hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
2493f1ba51af87cc45ff2f8d618ed01fb94d5213.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ioKernels.cu
*
* Created on: Mar 31, 2010
* Author: wen
*/
#include "../includes/ioKernels.h"
#include "commonCUDAKernels.cu"
__global__ void updatePFPCSynIO(float *synWPFPC, unsigned char *historyGR, int binPitch, unsigned short binN, int offset, unsigned char doLTD)
{
int i=blockIdx.x*blockDim.x+threadIdx.x+offset;
unsigned char *checkBinGR=(unsigned char *)((char *)historyGR+binN*binPitch);
synWPFPC[i]=synWPFPC[i]+checkBinGR[i]*(doLTD*PFPCLTDDECPF+(!doLTD)*PFPCLTPINCPF);
synWPFPC[i]=(synWPFPC[i]>0)*synWPFPC[i];
synWPFPC[i]=(synWPFPC[i]>1)+(synWPFPC[i]<=1)*synWPFPC[i];
}
void runIOKernels()
{
for(int i=0; i<NUMIO; i++)
{
hipLaunchKernelGGL(( updatePFPCSynIO), dim3(CUDAGRIONUMTBLOCK), dim3(CUDAGRIONUMTHREAD), 0, 0, pfSynWeightPCGPU, historyGRGPU, histGRGPUPitch, (histBinNGR+1)%NUMHISTBINSGR, i*(NUMGR/NUMIO), plasticityPFPCTimerIO[i]<0);
//<<<CUDAGRIONUMTBLOCK, CUDAGRIONUMTHREAD>>>
plasticityPFPCTimerIO[i]=plasticityPFPCTimerIO[i]+HISTBINWIDTHGR;
}
}
| 2493f1ba51af87cc45ff2f8d618ed01fb94d5213.cu | /*
* ioKernels.cu
*
* Created on: Mar 31, 2010
* Author: wen
*/
#include "../includes/ioKernels.h"
#include "commonCUDAKernels.cu"
__global__ void updatePFPCSynIO(float *synWPFPC, unsigned char *historyGR, int binPitch, unsigned short binN, int offset, unsigned char doLTD)
{
int i=blockIdx.x*blockDim.x+threadIdx.x+offset;
unsigned char *checkBinGR=(unsigned char *)((char *)historyGR+binN*binPitch);
synWPFPC[i]=synWPFPC[i]+checkBinGR[i]*(doLTD*PFPCLTDDECPF+(!doLTD)*PFPCLTPINCPF);
synWPFPC[i]=(synWPFPC[i]>0)*synWPFPC[i];
synWPFPC[i]=(synWPFPC[i]>1)+(synWPFPC[i]<=1)*synWPFPC[i];
}
void runIOKernels()
{
for(int i=0; i<NUMIO; i++)
{
updatePFPCSynIO<<<CUDAGRIONUMTBLOCK, CUDAGRIONUMTHREAD>>>(pfSynWeightPCGPU, historyGRGPU, histGRGPUPitch, (histBinNGR+1)%NUMHISTBINSGR, i*(NUMGR/NUMIO), plasticityPFPCTimerIO[i]<0);
//<<<CUDAGRIONUMTBLOCK, CUDAGRIONUMTHREAD>>>
plasticityPFPCTimerIO[i]=plasticityPFPCTimerIO[i]+HISTBINWIDTHGR;
}
}
|
bd1d750efa195597a0985e66a3333665d30dad30.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "accel_update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nx = 1;
int ny = 1;
double dx2inv = 1;
double dy2inv = 1;
double *d_z = NULL;
hipMalloc(&d_z, XSIZE*YSIZE);
double *d_a = NULL;
hipMalloc(&d_a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
accel_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dx2inv,dy2inv,d_z,d_a);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
accel_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dx2inv,dy2inv,d_z,d_a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
accel_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dx2inv,dy2inv,d_z,d_a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bd1d750efa195597a0985e66a3333665d30dad30.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "accel_update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nx = 1;
int ny = 1;
double dx2inv = 1;
double dy2inv = 1;
double *d_z = NULL;
cudaMalloc(&d_z, XSIZE*YSIZE);
double *d_a = NULL;
cudaMalloc(&d_a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
accel_update<<<gridBlock,threadBlock>>>(nx,ny,dx2inv,dy2inv,d_z,d_a);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
accel_update<<<gridBlock,threadBlock>>>(nx,ny,dx2inv,dy2inv,d_z,d_a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
accel_update<<<gridBlock,threadBlock>>>(nx,ny,dx2inv,dy2inv,d_z,d_a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
184adcab9a6e185e5dd568472dd51d3faa2ffdf0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <random>
#include <sys/time.h>
#include <rocblas.h>
static const char *_cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#define cuBLASErrChk(ans) { cuBLASAssert((ans), __FILE__, __LINE__); }
inline void cuBLASAssert(hipblasStatus_t code, const char *file, int line, bool abort=true)
{
if (code != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", _cudaGetErrorEnum(code), file, line);
if (abort) exit(code);
}
}
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct config {
int AH;
int AW;
int BH;
int BW;
int CH;
int CW;
float alpha;
float beta;
bool do_test;
};
/***************************************
* Device code "matmul"
**************************************/
/***************************************
* Host code "matmul"
**************************************/
float* host_mat_mul(const float* A, const float* B, const float* C, const struct config conf) {
printf("[Kernel] Run kernal\n");
/*** Initialize device memory ***/
size_t size_A = sizeof(float)*conf.AH*conf.AW;
size_t size_B = sizeof(float)*conf.BH*conf.BW;
size_t size_C = sizeof(float)*conf.CH*conf.CW;
float *d_A, *d_B, *d_C;
float *result = (float *) malloc (conf.CH*conf.CW*sizeof(float));
cudaErrChk (hipMalloc ((void**)(&d_A), size_A));
cudaErrChk (hipMalloc ((void**)(&d_B), size_B));
cudaErrChk (hipMalloc ((void**)(&d_C), size_C));
cudaErrChk (hipMemcpy (d_A, A, size_A, hipMemcpyHostToDevice));
cudaErrChk (hipMemcpy (d_B, B, size_B, hipMemcpyHostToDevice));
cudaErrChk (hipMemcpy (d_C, C, size_C, hipMemcpyHostToDevice));
cudaErrChk (hipDeviceSynchronize ())
/*** Setup execution config ***/
hipblasHandle_t handle;
cuBLASErrChk (hipblasCreate (&handle));
int m=conf.CH, n=conf.CW, k=conf.AW;
const float *alpha=&(conf.alpha), *beta=&(conf.beta);
/*** Run CUDA kernel ***/
hipEvent_t start, stop;
cudaErrChk(hipEventCreate(&start));
cudaErrChk(hipEventCreate(&stop));
cudaErrChk(hipEventRecord(start, NULL));
// Main body
cuBLASErrChk (hipblasSgemm (handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, d_B, n, d_A, k, beta, d_C, n));
// End of main body
cudaErrChk(hipEventRecord(stop, NULL));
cudaErrChk(hipEventSynchronize(stop));
float msec_total = 0.0f;
float gflo = conf.CH*conf.CW*(2.0*conf.AW+2)*1e-9;
cudaErrChk(hipEventElapsedTime(&msec_total, start, stop));
printf(" Elaped time: %.4f msec\n", msec_total);
printf(" gFlops : %.4f gflops\n", gflo/(msec_total*1e-3));
cudaErrChk (hipMemcpy(result, d_C, size_C, hipMemcpyDeviceToHost));
cudaErrChk (hipDeviceSynchronize ())
cudaErrChk (hipFree (d_A));
cudaErrChk (hipFree (d_B));
cudaErrChk (hipFree (d_C));
cuBLASErrChk (hipblasDestroy (handle));
return result;
}
/****************************************
* Helper functions for host
****************************************/
const struct config host_get_cmd_args(int argc, char** argv) {
int a=100, b=100, c=100;
float alpha=1.0f, beta=0.0f;
bool do_test = false;
if (argc >= 2)
do_test = (bool)atoi(argv[1]);
if (argc >= 7) {
a = atoi(argv[2]);
b = atoi(argv[3]);
c = atoi(argv[4]);
alpha = atof(argv[5]);
beta = atof(argv[6]);
}
struct config conf = {
a,
b,
b,
c,
a,
c,
alpha,
beta,
do_test
};
printf("\n================================================\n");
printf("CUDA implementaion of SGEMM\n");
printf(" args: ./matmul [test] [a, b, c, alpha, beta]\n");
printf(" C[a, c] = alpha * A[a, b] @ B[b, c] + beta * C[a, c]\n");
printf(" C[%d, %d] = %f * A[%d, %d] @ B[%d, %d] + %f * C[%d, %d]\n", a,c,alpha,a,b,b,c,beta,a,c);
printf("================================================\n\n");
return conf;
}
void host_initialize(float *mem, const int H, const int W) {
for (int i=0; i<H; i++) {
for (int j=0; j<W; j++) {
mem[i*W+j] = (float)(rand()%100);
}
}
}
void host_test(const float *A, const float *B, const float *C, const float * result, const struct config conf) {
if (conf.do_test == false) {
printf("[TEST] Test skipped..\n");
return;
}
printf("[TEST] Test start..\n");
float alpha=conf.alpha, beta=conf.beta;
int len_k = conf.AW;
for (int i=0; i<conf.CH; i++) {
for (int j=0; j<conf.CW; j++) {
float sum = 0;
for (int k=0; k<len_k; k++) {
sum += A[i*conf.AW+k]*B[k*conf.BW+j];
}
sum = alpha*sum+beta*C[i*conf.CW+j];
if (sum != result[i*conf.CW+j]){
printf(" [ERROR] C[%d][%d] = %.f != %f\n", i, j, result[i*conf.CW+j], sum);
printf(" Test failed...!\n");
return;
}
}
}
printf(" Test passed!!\n");
return;
}
/***************************************
* Main function
**************************************/
int main(int argc, char** argv) {
/*** Program configuration ***/
const struct config conf = host_get_cmd_args(argc, argv);
srand(0);
/*** Initialize Data ***/
float *A = (float *) malloc (conf.AH*conf.AW*sizeof(float));
float *B = (float *) malloc (conf.BH*conf.BW*sizeof(float));
float *C = (float *) calloc (conf.CH*conf.CW,sizeof(float));
host_initialize(A, conf.AH, conf.AW);
host_initialize(B, conf.BH, conf.BW);
host_initialize(C, conf.CH, conf.CW);
size_t total_size = (size_t)(conf.AH*conf.AW*sizeof(float) + conf.BH*conf.BW*sizeof(float) + 2.0*conf.CH*conf.CW*sizeof(float));
printf("[Mem] Total size of matrices : %.3fGB\n", total_size*1e-9);
/*** Run matmul ***/
float* result = host_mat_mul (A, B, C, conf);
/*** Test result ***/
host_test(A, B, C, result, conf);
/*** Finalize ***/
free (A);
free (B);
free (C);
free (result);
return 0;
}
| 184adcab9a6e185e5dd568472dd51d3faa2ffdf0.cu |
#include <cstdio>
#include <cstdlib>
#include <random>
#include <sys/time.h>
#include <cublas_v2.h>
static const char *_cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#define cuBLASErrChk(ans) { cuBLASAssert((ans), __FILE__, __LINE__); }
inline void cuBLASAssert(cublasStatus_t code, const char *file, int line, bool abort=true)
{
if (code != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", _cudaGetErrorEnum(code), file, line);
if (abort) exit(code);
}
}
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct config {
int AH;
int AW;
int BH;
int BW;
int CH;
int CW;
float alpha;
float beta;
bool do_test;
};
/***************************************
* Device code "matmul"
**************************************/
/***************************************
* Host code "matmul"
**************************************/
float* host_mat_mul(const float* A, const float* B, const float* C, const struct config conf) {
printf("[Kernel] Run kernal\n");
/*** Initialize device memory ***/
size_t size_A = sizeof(float)*conf.AH*conf.AW;
size_t size_B = sizeof(float)*conf.BH*conf.BW;
size_t size_C = sizeof(float)*conf.CH*conf.CW;
float *d_A, *d_B, *d_C;
float *result = (float *) malloc (conf.CH*conf.CW*sizeof(float));
cudaErrChk (cudaMalloc ((void**)(&d_A), size_A));
cudaErrChk (cudaMalloc ((void**)(&d_B), size_B));
cudaErrChk (cudaMalloc ((void**)(&d_C), size_C));
cudaErrChk (cudaMemcpy (d_A, A, size_A, cudaMemcpyHostToDevice));
cudaErrChk (cudaMemcpy (d_B, B, size_B, cudaMemcpyHostToDevice));
cudaErrChk (cudaMemcpy (d_C, C, size_C, cudaMemcpyHostToDevice));
cudaErrChk (cudaDeviceSynchronize ())
/*** Setup execution config ***/
cublasHandle_t handle;
cuBLASErrChk (cublasCreate (&handle));
int m=conf.CH, n=conf.CW, k=conf.AW;
const float *alpha=&(conf.alpha), *beta=&(conf.beta);
/*** Run CUDA kernel ***/
cudaEvent_t start, stop;
cudaErrChk(cudaEventCreate(&start));
cudaErrChk(cudaEventCreate(&stop));
cudaErrChk(cudaEventRecord(start, NULL));
// Main body
cuBLASErrChk (cublasSgemm (handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, d_B, n, d_A, k, beta, d_C, n));
// End of main body
cudaErrChk(cudaEventRecord(stop, NULL));
cudaErrChk(cudaEventSynchronize(stop));
float msec_total = 0.0f;
float gflo = conf.CH*conf.CW*(2.0*conf.AW+2)*1e-9;
cudaErrChk(cudaEventElapsedTime(&msec_total, start, stop));
printf(" Elaped time: %.4f msec\n", msec_total);
printf(" gFlops : %.4f gflops\n", gflo/(msec_total*1e-3));
cudaErrChk (cudaMemcpy(result, d_C, size_C, cudaMemcpyDeviceToHost));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaFree (d_A));
cudaErrChk (cudaFree (d_B));
cudaErrChk (cudaFree (d_C));
cuBLASErrChk (cublasDestroy (handle));
return result;
}
/****************************************
* Helper functions for host
****************************************/
const struct config host_get_cmd_args(int argc, char** argv) {
int a=100, b=100, c=100;
float alpha=1.0f, beta=0.0f;
bool do_test = false;
if (argc >= 2)
do_test = (bool)atoi(argv[1]);
if (argc >= 7) {
a = atoi(argv[2]);
b = atoi(argv[3]);
c = atoi(argv[4]);
alpha = atof(argv[5]);
beta = atof(argv[6]);
}
struct config conf = {
a,
b,
b,
c,
a,
c,
alpha,
beta,
do_test
};
printf("\n================================================\n");
printf("CUDA implementaion of SGEMM\n");
printf(" args: ./matmul [test] [a, b, c, alpha, beta]\n");
printf(" C[a, c] = alpha * A[a, b] @ B[b, c] + beta * C[a, c]\n");
printf(" C[%d, %d] = %f * A[%d, %d] @ B[%d, %d] + %f * C[%d, %d]\n", a,c,alpha,a,b,b,c,beta,a,c);
printf("================================================\n\n");
return conf;
}
void host_initialize(float *mem, const int H, const int W) {
for (int i=0; i<H; i++) {
for (int j=0; j<W; j++) {
mem[i*W+j] = (float)(rand()%100);
}
}
}
void host_test(const float *A, const float *B, const float *C, const float * result, const struct config conf) {
if (conf.do_test == false) {
printf("[TEST] Test skipped..\n");
return;
}
printf("[TEST] Test start..\n");
float alpha=conf.alpha, beta=conf.beta;
int len_k = conf.AW;
for (int i=0; i<conf.CH; i++) {
for (int j=0; j<conf.CW; j++) {
float sum = 0;
for (int k=0; k<len_k; k++) {
sum += A[i*conf.AW+k]*B[k*conf.BW+j];
}
sum = alpha*sum+beta*C[i*conf.CW+j];
if (sum != result[i*conf.CW+j]){
printf(" [ERROR] C[%d][%d] = %.f != %f\n", i, j, result[i*conf.CW+j], sum);
printf(" Test failed...!\n");
return;
}
}
}
printf(" Test passed!!\n");
return;
}
/***************************************
* Main function
**************************************/
int main(int argc, char** argv) {
/*** Program configuration ***/
const struct config conf = host_get_cmd_args(argc, argv);
srand(0);
/*** Initialize Data ***/
float *A = (float *) malloc (conf.AH*conf.AW*sizeof(float));
float *B = (float *) malloc (conf.BH*conf.BW*sizeof(float));
float *C = (float *) calloc (conf.CH*conf.CW,sizeof(float));
host_initialize(A, conf.AH, conf.AW);
host_initialize(B, conf.BH, conf.BW);
host_initialize(C, conf.CH, conf.CW);
size_t total_size = (size_t)(conf.AH*conf.AW*sizeof(float) + conf.BH*conf.BW*sizeof(float) + 2.0*conf.CH*conf.CW*sizeof(float));
printf("[Mem] Total size of matrices : %.3fGB\n", total_size*1e-9);
/*** Run matmul ***/
float* result = host_mat_mul (A, B, C, conf);
/*** Test result ***/
host_test(A, B, C, result, conf);
/*** Finalize ***/
free (A);
free (B);
free (C);
free (result);
return 0;
}
|
bae3ec95bcdc8c01440582c4bf98be429b651fd4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculate_A_ch_1_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *rho = NULL;
hipMalloc(&rho, XSIZE*YSIZE);
float *dz = NULL;
hipMalloc(&dz, XSIZE*YSIZE);
float *s_a = NULL;
hipMalloc(&s_a, XSIZE*YSIZE);
float *xx_or_yy = NULL;
hipMalloc(&xx_or_yy, XSIZE*YSIZE);
float *s_b = NULL;
hipMalloc(&s_b, XSIZE*YSIZE);
float K = 1;
int npix = 1;
int nchannels = 1;
int nimages = 1;
float *A_ch = NULL;
hipMalloc(&A_ch, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculate_A_ch_1_2), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculate_A_ch_1_2), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculate_A_ch_1_2), dim3(gridBlock),dim3(threadBlock), 0, 0, rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bae3ec95bcdc8c01440582c4bf98be429b651fd4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculate_A_ch_1_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *rho = NULL;
cudaMalloc(&rho, XSIZE*YSIZE);
float *dz = NULL;
cudaMalloc(&dz, XSIZE*YSIZE);
float *s_a = NULL;
cudaMalloc(&s_a, XSIZE*YSIZE);
float *xx_or_yy = NULL;
cudaMalloc(&xx_or_yy, XSIZE*YSIZE);
float *s_b = NULL;
cudaMalloc(&s_b, XSIZE*YSIZE);
float K = 1;
int npix = 1;
int nchannels = 1;
int nimages = 1;
float *A_ch = NULL;
cudaMalloc(&A_ch, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculate_A_ch_1_2<<<gridBlock,threadBlock>>>(rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculate_A_ch_1_2<<<gridBlock,threadBlock>>>(rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculate_A_ch_1_2<<<gridBlock,threadBlock>>>(rho,dz,s_a,xx_or_yy,s_b,K,npix,nchannels,nimages,A_ch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
91aaa4eb2f9ab4fd0e6309adab5b4e6c81692ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include <gasal2/gasal.h>
#define CORE_GLOBAL_COMPUTE() \
uint32_t gbase = (gpac >> l) & 15;\
DEV_GET_SUB_SCORE_GLOBAL(subScore, rbase, gbase);\
int32_t tmp_hm = p[m] + subScore;\
h[m] = max(tmp_hm, f[m]);\
h[m] = max(h[m], e);\
f[m] = (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (f[m] - _cudaGapExtend);\
e = (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (e - _cudaGapExtend);\
p[m] = h[m-1];\
#define CORE_GLOBAL_COMPUTE_TB(direction_reg) \
uint32_t gbase = (gpac >> l) & 15;\
DEV_GET_SUB_SCORE_GLOBAL(subScore, rbase, gbase);\
int32_t tmp_hm = p[m] + subScore;\
uint32_t m_or_x = tmp_hm >= p[m] ? 0 : 1;\
h[m] = max(tmp_hm, f[m]);\
h[m] = max(h[m], e);\
direction_reg |= h[m] == tmp_hm ? m_or_x << (28 - ((m - 1) << 2)) : (h[m] == f[m] ? (uint32_t)3 << (28 - ((m - 1) << 2)) : (uint32_t)2 << (28 - ((m - 1) << 2)));\
direction_reg |= (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (uint32_t)0 : (uint32_t)1 << (31 - ((m - 1) << 2));\
f[m] = (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (f[m] - _cudaGapExtend);\
direction_reg|= (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (uint32_t)0 : (uint32_t)1 << (30 - ((m - 1) << 2));\
e = (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (e - _cudaGapExtend);\
p[m] = h[m-1];\
template <CompStart S>
__global__ void gasal_global_kernel(uint32_t *packed_query_batch, uint32_t *packed_target_batch, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *query_batch_offsets, uint32_t *target_batch_offsets, gasal_res_t *device_res, uint4 *packed_tb_matrices, int n_tasks)
{
int32_t i, j, k, l, m;
int32_t u = 0, r = 0;
int32_t e;
int32_t subScore;
int tile_no = 0;
int32_t ridx;
short2 HD;
const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;//thread ID
if (tid >= n_tasks) return;
uint32_t packed_target_batch_idx = target_batch_offsets[tid] >> 3;//starting index of the target_batch sequence
uint32_t packed_query_batch_idx = query_batch_offsets[tid] >> 3;//starting index of the query_batch sequence
uint32_t read_len = query_batch_lens[tid];
uint32_t ref_len = target_batch_lens[tid];
uint32_t query_batch_regs = (read_len >> 3) + (read_len&7 ? 1 : 0);//number of 32-bit words holding sequence of query_batch
uint32_t target_batch_regs = (ref_len >> 3) + (ref_len&7 ? 1 : 0);//number of 32-bit words holding sequence of target_batch
//-------arrays to save intermediate values----------------
short2 global[MAX_QUERY_LEN];
int32_t h[9];
int32_t f[9];
int32_t p[9];
int32_t max_h[9];
//----------------------------------------------------------
global[0] = make_short2(0, MINUS_INF);
for (i = 1; i < MAX_QUERY_LEN; i++) {
global[i] = make_short2(-(_cudaGapO + (_cudaGapExtend*(i))), MINUS_INF);
}
h[u++] = 0;
p[r++] = 0;
for (i = 0; i < target_batch_regs; i++) { //target_batch sequence in rows, for all WORDS (i=WORD index)
ridx = 0;
for (m = 1; m < 9; m++, u++, r++) {
h[m] = -(_cudaGapO + (_cudaGapExtend*(u)));
f[m] = MINUS_INF;
p[m] = r == 1 ? 0 : -(_cudaGapO + (_cudaGapExtend*(r-1)));
}
register uint32_t gpac =packed_target_batch[packed_target_batch_idx + i];//load 8 packed bases from target_batch sequence
for (j = 0; j < query_batch_regs; /*++j*/ j+=1) { //query_batch sequence in columns, for all WORDS (j=WORD index).
register uint32_t rpac =packed_query_batch[packed_query_batch_idx + j];//load 8 packed bases from query_batch sequence
//--------------compute a tile of 8x8 cells-------------------
if (S==CompStart::WITH_TB) {
uint4 direction = make_uint4(0,0,0,0);
uint32_t rbase = (rpac >> 28) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.x);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 24) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.y);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 20) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.z);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 16) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.w);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
packed_tb_matrices[(tile_no*n_tasks) + tid] = direction;
tile_no++;
direction = make_uint4(0,0,0,0);
rbase = (rpac >> 12) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.x);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 8) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.y);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 4) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.z);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = rpac & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.w);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
packed_tb_matrices[(tile_no*n_tasks) + tid] = direction;
tile_no++;
}
else{
for (k = 28; k >= 0; k -= 4) {
uint32_t rbase = (rpac >> k) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
//----------------------------------------------------------
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE();
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
//----------------------------------------------
}
}
//------------------------------------------------------------------
}
}
device_res->aln_score[tid] = max_h[8 - ((target_batch_regs << 3) - (ref_len))];//copy the max score to the output array in the GPU mem
}
| 91aaa4eb2f9ab4fd0e6309adab5b4e6c81692ee1.cu | #pragma once
#include <gasal2/gasal.h>
#define CORE_GLOBAL_COMPUTE() \
uint32_t gbase = (gpac >> l) & 15;\
DEV_GET_SUB_SCORE_GLOBAL(subScore, rbase, gbase);\
int32_t tmp_hm = p[m] + subScore;\
h[m] = max(tmp_hm, f[m]);\
h[m] = max(h[m], e);\
f[m] = (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (f[m] - _cudaGapExtend);\
e = (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (e - _cudaGapExtend);\
p[m] = h[m-1];\
#define CORE_GLOBAL_COMPUTE_TB(direction_reg) \
uint32_t gbase = (gpac >> l) & 15;\
DEV_GET_SUB_SCORE_GLOBAL(subScore, rbase, gbase);\
int32_t tmp_hm = p[m] + subScore;\
uint32_t m_or_x = tmp_hm >= p[m] ? 0 : 1;\
h[m] = max(tmp_hm, f[m]);\
h[m] = max(h[m], e);\
direction_reg |= h[m] == tmp_hm ? m_or_x << (28 - ((m - 1) << 2)) : (h[m] == f[m] ? (uint32_t)3 << (28 - ((m - 1) << 2)) : (uint32_t)2 << (28 - ((m - 1) << 2)));\
direction_reg |= (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (uint32_t)0 : (uint32_t)1 << (31 - ((m - 1) << 2));\
f[m] = (tmp_hm - _cudaGapOE) > (f[m] - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (f[m] - _cudaGapExtend);\
direction_reg|= (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (uint32_t)0 : (uint32_t)1 << (30 - ((m - 1) << 2));\
e = (tmp_hm - _cudaGapOE) > (e - _cudaGapExtend) ? (tmp_hm - _cudaGapOE) : (e - _cudaGapExtend);\
p[m] = h[m-1];\
template <CompStart S>
__global__ void gasal_global_kernel(uint32_t *packed_query_batch, uint32_t *packed_target_batch, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *query_batch_offsets, uint32_t *target_batch_offsets, gasal_res_t *device_res, uint4 *packed_tb_matrices, int n_tasks)
{
int32_t i, j, k, l, m;
int32_t u = 0, r = 0;
int32_t e;
int32_t subScore;
int tile_no = 0;
int32_t ridx;
short2 HD;
const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;//thread ID
if (tid >= n_tasks) return;
uint32_t packed_target_batch_idx = target_batch_offsets[tid] >> 3;//starting index of the target_batch sequence
uint32_t packed_query_batch_idx = query_batch_offsets[tid] >> 3;//starting index of the query_batch sequence
uint32_t read_len = query_batch_lens[tid];
uint32_t ref_len = target_batch_lens[tid];
uint32_t query_batch_regs = (read_len >> 3) + (read_len&7 ? 1 : 0);//number of 32-bit words holding sequence of query_batch
uint32_t target_batch_regs = (ref_len >> 3) + (ref_len&7 ? 1 : 0);//number of 32-bit words holding sequence of target_batch
//-------arrays to save intermediate values----------------
short2 global[MAX_QUERY_LEN];
int32_t h[9];
int32_t f[9];
int32_t p[9];
int32_t max_h[9];
//----------------------------------------------------------
global[0] = make_short2(0, MINUS_INF);
for (i = 1; i < MAX_QUERY_LEN; i++) {
global[i] = make_short2(-(_cudaGapO + (_cudaGapExtend*(i))), MINUS_INF);
}
h[u++] = 0;
p[r++] = 0;
for (i = 0; i < target_batch_regs; i++) { //target_batch sequence in rows, for all WORDS (i=WORD index)
ridx = 0;
for (m = 1; m < 9; m++, u++, r++) {
h[m] = -(_cudaGapO + (_cudaGapExtend*(u)));
f[m] = MINUS_INF;
p[m] = r == 1 ? 0 : -(_cudaGapO + (_cudaGapExtend*(r-1)));
}
register uint32_t gpac =packed_target_batch[packed_target_batch_idx + i];//load 8 packed bases from target_batch sequence
for (j = 0; j < query_batch_regs; /*++j*/ j+=1) { //query_batch sequence in columns, for all WORDS (j=WORD index).
register uint32_t rpac =packed_query_batch[packed_query_batch_idx + j];//load 8 packed bases from query_batch sequence
//--------------compute a tile of 8x8 cells-------------------
if (S==CompStart::WITH_TB) {
uint4 direction = make_uint4(0,0,0,0);
uint32_t rbase = (rpac >> 28) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.x);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 24) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.y);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 20) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.z);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 16) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.w);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
packed_tb_matrices[(tile_no*n_tasks) + tid] = direction;
tile_no++;
direction = make_uint4(0,0,0,0);
rbase = (rpac >> 12) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.x);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 8) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.y);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = (rpac >> 4) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.z);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
rbase = rpac & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE_TB(direction.w);
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
packed_tb_matrices[(tile_no*n_tasks) + tid] = direction;
tile_no++;
}
else{
for (k = 28; k >= 0; k -= 4) {
uint32_t rbase = (rpac >> k) & 15;//get a base from query_batch sequence
//------------load intermediate values----------------------
HD = global[ridx];
h[0] = HD.x;
e = HD.y;
//----------------------------------------------------------
#pragma unroll 8
for (l = 28, m = 1; m < 9; l -= 4, m++) {
CORE_GLOBAL_COMPUTE();
}
//--------------save intermediate values-------------------------
HD.x = h[m-1];
HD.y = e;//max(e, 0);
global[ridx] = HD;
ridx++;
//--------------------------------------------------------------
//------the last column of DP matrix------------
if (ridx == read_len) {
for (m = 1; m < 9; m++) {
max_h[m] = h[m];
}
}
//----------------------------------------------
}
}
//------------------------------------------------------------------
}
}
device_res->aln_score[tid] = max_h[8 - ((target_batch_regs << 3) - (ref_len))];//copy the max score to the output array in the GPU mem
}
|
021403629c244f8812f484a13fd1ac4db52cf5f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void test(float* nonSmoothed, float* smoothed, int* mask, int nhalf) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int diff;
if (i < nhalf) {
diff = fabs(nonSmoothed[i] - smoothed[i]/nhalf);
mask[i] = (diff > 0.23) ? 1 : 0; // WHAT THRESHOLD TO USE?? different behaviour as opposed to CPU version!
}
} | 021403629c244f8812f484a13fd1ac4db52cf5f4.cu | #include "includes.h"
__global__ void test(float* nonSmoothed, float* smoothed, int* mask, int nhalf) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int diff;
if (i < nhalf) {
diff = fabs(nonSmoothed[i] - smoothed[i]/nhalf);
mask[i] = (diff > 0.23) ? 1 : 0; // WHAT THRESHOLD TO USE?? different behaviour as opposed to CPU version!
}
} |
e91b33fc21c11e3aedf78acf4e43f35073e1971e.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_fvm.cuh"
#include <hip/hip_runtime.h>
//#include "cuPrintf.hip"
int inputMesh(CELL **cell,NODE **node)
{
FILE *fp;
int icell,inode;
if( (fp=fopen("2822-3k.dat","r"))==NULL )
{
printf("\nCannot open the mesh data file!\n");
exit(0);
}
fscanf(fp,"%d %d",&Nnode,&Ncell);
*cell =(CELL *)calloc(Ncell+1,sizeof(CELL));
*node =(NODE *)calloc(Nnode+1,sizeof(NODE));
for(inode=1;inode<=Nnode;inode++)
fscanf(fp,"%lf",&(*node)[inode].x);
for(inode=1;inode<=Nnode;inode++)
fscanf(fp,"%lf",&(*node)[inode].y);
for(icell=1;icell<=Ncell;icell++)
for(int i=0;i<3;i++)
{
fscanf(fp,"%d",&(*cell)[icell].Point[i]);
// &(*cell+icell)->Point[i]--;
}
fclose(fp);
if((fp=fopen("input.dat","r"))==NULL)
{
printf("\nCannot open input file!\n");
exit(0);
}
fscanf(fp,"%lf %lf %lf %lf %lf %lf %lf %d",&MA,&ALPHA,&GAMA,&PIN,&TIN,&CFL,&EPSL,&MAXSTEP);
fclose(fp);
printf("inputData finished!\n");
return 0;
}
int CalculationPTA(W &WW){
double U,V,E;
U=WW.density_U/WW.density;
V=WW.density_V/WW.density;
E=WW.density_E/WW.density-0.5*(U*U+V*V);
WW.P=(GAMA-1.0)*WW.density*E;
WW.A=sqrt(GAMA*WW.P/WW.density);
WW.T=(GAMA-1.0)*E/R;
return 0;
}
int Initialization(W *w)
{
int icell;
double rou_U,rou_V,rou_E;
ALPHA=ALPHA*PI/180.0;
AIN=sqrt(GAMA*R*TIN);
VIN=MA*AIN;
ROUIN=PIN/(R*TIN);
UI=cos(ALPHA);
VI=sin(ALPHA);
rou_U=cos(ALPHA);
rou_V=sin(ALPHA);
rou_E=R*TIN/(VIN*VIN)/(GAMA-1.0)+0.50;
for(icell=1;icell<=Ncell;icell++)
{
w[icell].density =1.0;
w[icell].density_U=rou_U;
w[icell].density_V=rou_V;
w[icell].density_E=rou_E;
CalculationPTA(w[icell]);
}
PIS=w[1].P;
return 0;
}
double Calc_area(double x1,double y1,double x2,double y2,double x3,double y3)
{
return(0.5*((y3-y1)*(x2-x1)-(y2-y1)*(x3-x1)));
}
int CalculationMeshGeo(CELL *cell,NODE *node,EDGE *edge,EDGE *WallEdge)
{
int icell,iedge;
int i,j,nn;
int Findedge,Findcell,ci;
int IP[3]={1,2,0};
for(icell=1;icell<=Ncell;icell++){
CellArea[icell]=Calc_area(node[cell[icell].Point[0]].x,node[cell[icell].Point[0]].y,
node[cell[icell].Point[1]].x,node[cell[icell].Point[1]].y,
node[cell[icell].Point[2]].x,node[cell[icell].Point[2]].y);
if( CellArea[icell]<=0.0)printf("CellArea<0! %d %f\n",icell, CellArea[icell]);
cell[icell].center[0]=(node[cell[icell].Point[0]].x+node[cell[icell].Point[1]].x+node[cell[icell].Point[2]].x)/3.0;
cell[icell].center[1]=(node[cell[icell].Point[0]].y+node[cell[icell].Point[1]].y+node[cell[icell].Point[2]].y)/3.0;
}
FarBoundNum=0;
WallBoundNum=0;
Nedge=0;
for(icell=1;icell<=Ncell;icell++)
{
if(icell%100 ==0)printf("icell %d\n",icell);
for(i=0;i<3;i++)
{
int ie1=IP[i];
int ie2=IP[ie1];
int N1=cell[icell].Point[ie1];
int N2=cell[icell].Point[ie2];
Findedge=0;
for(iedge=1;iedge<=Nedge;iedge++)
{
if( ( edge[iedge].node1==N1 && edge[iedge].node2==N2 ) || ( edge[iedge].node1==N2 && edge[iedge].node2==N1 ) )
{
Findedge=1;
break;
}
}
cell[icell].neighbor[i].celledge=iedge;
if(Findedge==0)
{
Nedge++;
edge[Nedge].left_cell=icell;
edge[Nedge].node1=N1;
edge[Nedge].node2=N2;
edge[Nedge].vectorx=node[N1].x-node[N2].x;
edge[Nedge].vectory=node[N2].y-node[N1].y;
edge[Nedge].midx = 0.5*(node[N1].x+node[N2].x);
edge[Nedge].midy = 0.5*(node[N1].y+node[N2].y);
edge[Nedge].vectorn= sqrt(edge[Nedge].vectorx*edge[Nedge].vectorx + edge[Nedge].vectory*edge[Nedge].vectory);
Findcell=0;
for(ci=icell+1;ci<=Ncell;ci++)
{
for(j=0;j<3;j++)
{
ie1=IP[j];
ie2=IP[ie1];
int NN1=cell[ci].Point[ie1];
int NN2=cell[ci].Point[ie2];
if( (NN1==N1&&NN2==N2) || (NN1==N2&&NN2==N1) )
{
Findcell=1;
break;
}
}
if(Findcell==1)
break;
}
if(Findcell==1){
edge[iedge].right_cell=ci;
edge[iedge].ELog=0;
}
else{
edge[iedge].right_cell=-1;
if( fabs(edge[iedge].midx) <2.0 && fabs(edge[iedge].midy) <2.0){
edge[iedge].ELog=2;
node[edge[iedge].node1].NLog=2;
node[edge[iedge].node2].NLog=2;
WallBoundNum++;
edge[iedge].wallid=WallBoundNum;
WallEdge[WallBoundNum]=edge[iedge];
}
else{
edge[iedge].ELog=1;
FarBoundNum++;
edge[iedge].farfieldid=FarBoundNum;
}
}
}
}
nn=3;
for(i=0;i<3;i++){
cell[icell].neighbor[i].neicell = edge[cell[icell].neighbor[i].celledge].left_cell == icell ?
edge[cell[icell].neighbor[i].celledge].right_cell :
edge[cell[icell].neighbor[i].celledge].left_cell;
if(cell[icell].neighbor[i].neicell == -1){
cell[icell].neighbor[i].neicell *= edge[cell[icell].neighbor[i].celledge].ELog;
nn--;
}
}
cell[icell].CLog=nn;
}
printf("Mesh data Computing's finished!!\n");
return 0;
}
bool InitGPUSet()
{
char GPU[100] = "GPU: ";
hipDeviceProp_t tCard;
int num = 0;
if(hipSuccess == hipGetDeviceCount(&num))
{
for(int i = 0; i < num; ++ i)
{
hipSetDevice(i);
hipGetDeviceProperties(&tCard, i);
puts(strcat(GPU , tCard.name));//,
}
}
else return false;
return true;
}
int Cuda_Init()
{
// if(!InitGPUSet()) puts("device is not ready!");
hipSetDevice(2);
/* cudaPrintfInit();
displayGPU_demo<<<2, 3>>>();
cudaPrintfDisplay(stdout, true);//trueblockthread[blockID, threadID]false
cudaPrintfEnd();
*/
hipMalloc(&d_cell, (Ncell+1)*sizeof(CELL));
hipMalloc(&d_node, (Ncell+1)*sizeof(NODE));
hipMalloc(&d_edge, (Nedge+1)*sizeof(EDGE));
hipMalloc(&d_w , (Ncell+1)*sizeof(W ));
hipMalloc(&d_CellArea, (Ncell+1)*sizeof(double));
hipMemcpy(d_cell, cell, (Ncell+1)*sizeof(CELL), hipMemcpyHostToDevice);
hipMemcpy(d_node, node, (Nnode+1)*sizeof(NODE), hipMemcpyHostToDevice);
hipMemcpy(d_edge, edge, (Nedge+1)*sizeof(EDGE), hipMemcpyHostToDevice);
hipMemcpy(d_w , w , (Ncell+1)*sizeof(W), hipMemcpyHostToDevice);
hipMemcpy(d_CellArea, CellArea, (Ncell+1)*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
return 0;
}
| e91b33fc21c11e3aedf78acf4e43f35073e1971e.cu | #include "gpu_fvm.cuh"
#include <cuda.h>
//#include "cuPrintf.cu"
int inputMesh(CELL **cell,NODE **node)
{
FILE *fp;
int icell,inode;
if( (fp=fopen("2822-3k.dat","r"))==NULL )
{
printf("\nCannot open the mesh data file!\n");
exit(0);
}
fscanf(fp,"%d %d",&Nnode,&Ncell);
*cell =(CELL *)calloc(Ncell+1,sizeof(CELL));
*node =(NODE *)calloc(Nnode+1,sizeof(NODE));
for(inode=1;inode<=Nnode;inode++)
fscanf(fp,"%lf",&(*node)[inode].x);
for(inode=1;inode<=Nnode;inode++)
fscanf(fp,"%lf",&(*node)[inode].y);
for(icell=1;icell<=Ncell;icell++)
for(int i=0;i<3;i++)
{
fscanf(fp,"%d",&(*cell)[icell].Point[i]);
// &(*cell+icell)->Point[i]--;
}
fclose(fp);
if((fp=fopen("input.dat","r"))==NULL)
{
printf("\nCannot open input file!\n");
exit(0);
}
fscanf(fp,"%lf %lf %lf %lf %lf %lf %lf %d",&MA,&ALPHA,&GAMA,&PIN,&TIN,&CFL,&EPSL,&MAXSTEP);
fclose(fp);
printf("inputData finished!\n");
return 0;
}
int CalculationPTA(W &WW){
double U,V,E;
U=WW.density_U/WW.density;
V=WW.density_V/WW.density;
E=WW.density_E/WW.density-0.5*(U*U+V*V);
WW.P=(GAMA-1.0)*WW.density*E;
WW.A=sqrt(GAMA*WW.P/WW.density);
WW.T=(GAMA-1.0)*E/R;
return 0;
}
int Initialization(W *w)
{
int icell;
double rou_U,rou_V,rou_E;
ALPHA=ALPHA*PI/180.0;
AIN=sqrt(GAMA*R*TIN);
VIN=MA*AIN;
ROUIN=PIN/(R*TIN);
UI=cos(ALPHA);
VI=sin(ALPHA);
rou_U=cos(ALPHA);
rou_V=sin(ALPHA);
rou_E=R*TIN/(VIN*VIN)/(GAMA-1.0)+0.50;
for(icell=1;icell<=Ncell;icell++)
{
w[icell].density =1.0;
w[icell].density_U=rou_U;
w[icell].density_V=rou_V;
w[icell].density_E=rou_E;
CalculationPTA(w[icell]);
}
PIS=w[1].P;
return 0;
}
double Calc_area(double x1,double y1,double x2,double y2,double x3,double y3)
{
return(0.5*((y3-y1)*(x2-x1)-(y2-y1)*(x3-x1)));
}
int CalculationMeshGeo(CELL *cell,NODE *node,EDGE *edge,EDGE *WallEdge)
{
int icell,iedge;
int i,j,nn;
int Findedge,Findcell,ci;
int IP[3]={1,2,0};
for(icell=1;icell<=Ncell;icell++){
CellArea[icell]=Calc_area(node[cell[icell].Point[0]].x,node[cell[icell].Point[0]].y,
node[cell[icell].Point[1]].x,node[cell[icell].Point[1]].y,
node[cell[icell].Point[2]].x,node[cell[icell].Point[2]].y);
if( CellArea[icell]<=0.0)printf("CellArea<0! %d %f\n",icell, CellArea[icell]);
cell[icell].center[0]=(node[cell[icell].Point[0]].x+node[cell[icell].Point[1]].x+node[cell[icell].Point[2]].x)/3.0;
cell[icell].center[1]=(node[cell[icell].Point[0]].y+node[cell[icell].Point[1]].y+node[cell[icell].Point[2]].y)/3.0;
}
FarBoundNum=0;
WallBoundNum=0;
Nedge=0;
for(icell=1;icell<=Ncell;icell++)
{
if(icell%100 ==0)printf("icell %d\n",icell);
for(i=0;i<3;i++)
{
int ie1=IP[i];
int ie2=IP[ie1];
int N1=cell[icell].Point[ie1];
int N2=cell[icell].Point[ie2];
Findedge=0;
for(iedge=1;iedge<=Nedge;iedge++)
{
if( ( edge[iedge].node1==N1 && edge[iedge].node2==N2 ) || ( edge[iedge].node1==N2 && edge[iedge].node2==N1 ) )
{
Findedge=1;
break;
}
}
cell[icell].neighbor[i].celledge=iedge;
if(Findedge==0)
{
Nedge++;
edge[Nedge].left_cell=icell;
edge[Nedge].node1=N1;
edge[Nedge].node2=N2;
edge[Nedge].vectorx=node[N1].x-node[N2].x;
edge[Nedge].vectory=node[N2].y-node[N1].y;
edge[Nedge].midx = 0.5*(node[N1].x+node[N2].x);
edge[Nedge].midy = 0.5*(node[N1].y+node[N2].y);
edge[Nedge].vectorn= sqrt(edge[Nedge].vectorx*edge[Nedge].vectorx + edge[Nedge].vectory*edge[Nedge].vectory);
Findcell=0;
for(ci=icell+1;ci<=Ncell;ci++)
{
for(j=0;j<3;j++)
{
ie1=IP[j];
ie2=IP[ie1];
int NN1=cell[ci].Point[ie1];
int NN2=cell[ci].Point[ie2];
if( (NN1==N1&&NN2==N2) || (NN1==N2&&NN2==N1) )
{
Findcell=1;
break;
}
}
if(Findcell==1)
break;
}
if(Findcell==1){
edge[iedge].right_cell=ci;
edge[iedge].ELog=0;
}
else{
edge[iedge].right_cell=-1;
if( fabs(edge[iedge].midx) <2.0 && fabs(edge[iedge].midy) <2.0){
edge[iedge].ELog=2;
node[edge[iedge].node1].NLog=2;
node[edge[iedge].node2].NLog=2;
WallBoundNum++;
edge[iedge].wallid=WallBoundNum;
WallEdge[WallBoundNum]=edge[iedge];
}
else{
edge[iedge].ELog=1;
FarBoundNum++;
edge[iedge].farfieldid=FarBoundNum;
}
}
}
}
nn=3;
for(i=0;i<3;i++){
cell[icell].neighbor[i].neicell = edge[cell[icell].neighbor[i].celledge].left_cell == icell ?
edge[cell[icell].neighbor[i].celledge].right_cell :
edge[cell[icell].neighbor[i].celledge].left_cell;
if(cell[icell].neighbor[i].neicell == -1){
cell[icell].neighbor[i].neicell *= edge[cell[icell].neighbor[i].celledge].ELog;
nn--;
}
}
cell[icell].CLog=nn;
}
printf("Mesh data Computing's finished!!\n");
return 0;
}
bool InitGPUSet()
{
char GPU[100] = "GPU: ";
cudaDeviceProp tCard;
int num = 0;
if(cudaSuccess == cudaGetDeviceCount(&num))
{
for(int i = 0; i < num; ++ i)
{
cudaSetDevice(i);
cudaGetDeviceProperties(&tCard, i);
puts(strcat(GPU , tCard.name));//返回的就是链接后的结果,也为其的嵌套使用提供了条件
}
}
else return false;
return true;
}
int Cuda_Init()
{
// if(!InitGPUSet()) puts("device is not ready!");
cudaSetDevice(2);
/* cudaPrintfInit();
displayGPU_demo<<<2, 3>>>();
cudaPrintfDisplay(stdout, true);//true输出是哪一个block的第几个thread在执行本条输出语句,形如:[blockID, threadID];false不输出
cudaPrintfEnd();
*/
cudaMalloc(&d_cell, (Ncell+1)*sizeof(CELL));
cudaMalloc(&d_node, (Ncell+1)*sizeof(NODE));
cudaMalloc(&d_edge, (Nedge+1)*sizeof(EDGE));
cudaMalloc(&d_w , (Ncell+1)*sizeof(W ));
cudaMalloc(&d_CellArea, (Ncell+1)*sizeof(double));
cudaMemcpy(d_cell, cell, (Ncell+1)*sizeof(CELL), cudaMemcpyHostToDevice);
cudaMemcpy(d_node, node, (Nnode+1)*sizeof(NODE), cudaMemcpyHostToDevice);
cudaMemcpy(d_edge, edge, (Nedge+1)*sizeof(EDGE), cudaMemcpyHostToDevice);
cudaMemcpy(d_w , w , (Ncell+1)*sizeof(W), cudaMemcpyHostToDevice);
cudaMemcpy(d_CellArea, CellArea, (Ncell+1)*sizeof(double), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
return 0;
}
|
cafeb0d40c6de453fcf334643f8f3391ba652086.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Inverse Discrete Cosine Transform in row wise (DCT two)
* DCT_II_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_II_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_II_Row_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTII_Row__InverseKernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __cosf(((2 * Col + 1) / (2.0 * numAColumns))*PI_d*(threadIdx.y + k*TILE_DIM))*sqrtf(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1)))*sqrtf(2.0 / numAColumns); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTInverseRowTwoS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTII_Row__InverseKernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTII_Row__InverseKernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| cafeb0d40c6de453fcf334643f8f3391ba652086.cu | /*
* Inverse Discrete Cosine Transform in row wise (DCT two)
* DCT_II_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_II_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_II_Row_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTII_Row__InverseKernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __cosf(((2 * Col + 1) / (2.0 * numAColumns))*PI_d*(threadIdx.y + k*TILE_DIM))*sqrtf(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1)))*sqrtf(2.0 / numAColumns); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTInverseRowTwoS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTII_Row__InverseKernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTII_Row__InverseKernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
2967a70b141212c79a52954aa6bd26af9ab88f33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 17.05.2018
// @author [email protected]
//
#include <ops/declarable/helpers/percentile.h>
#include <NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/DebugHelper.h>
#include "ResultSet.h"
namespace nd4j {
namespace ops {
namespace helpers {
template <typename X>
static _CUDA_G void percentileKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, const Nd4jLong numTads, const Nd4jLong tadLength, void *vz, Nd4jLong *zShapeInfo, const Nd4jLong zLength, const Nd4jLong position) {
for (int t = blockIdx.x; t < numTads; t += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t];
auto z = reinterpret_cast<X*>(vz);
// sort tad
if (tadLength > 1) {
for (int m = 0; m < tadLength; m++) {
if (m % 2 == 0) {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo, tadLength);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo, tadLength);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo, tadLength);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo, tadLength);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
}
__syncthreads();
}
}
// saving final value
if (threadIdx.x == 0)
z[shape::getIndexOffset(t, zShapeInfo, zLength)] = x[shape::getIndexOffset(position, xTadShapeInfo, tadLength)];
__syncthreads();
}
}
template <typename T>
static void _percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) {
const int inputRank = input.rankOf();
if(axis.empty())
for(int i=0; i<inputRank; ++i)
axis.push_back(i);
else
shape::checkDimensions(inputRank, axis);
auto tempArray = input.dup(input.ordering());
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray->getShapeInfo(), axis);
auto tadLength = shape::length(packX.primaryShapeInfo());
const float fraction = 1.f - q / 100.;
Nd4jLong position = 0;
switch(interpolation) {
case 0: // lower
position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction));
break;
case 1: // higher
position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction));
break;
case 2: // nearest
position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction));
break;
}
position = tadLength - position - 1;
hipLaunchKernelGGL(( percentileKernel<T>), dim3(256), dim3(512), 1024, *context->getCudaStream(), tempArray->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position);
nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile");
delete tempArray;
}
void percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
BUILD_SINGLE_TEMPLATE(template void _percentile, (nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES);
}
}
} | 2967a70b141212c79a52954aa6bd26af9ab88f33.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 17.05.2018
// @author [email protected]
//
#include <ops/declarable/helpers/percentile.h>
#include <NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/DebugHelper.h>
#include "ResultSet.h"
namespace nd4j {
namespace ops {
namespace helpers {
template <typename X>
static _CUDA_G void percentileKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, const Nd4jLong numTads, const Nd4jLong tadLength, void *vz, Nd4jLong *zShapeInfo, const Nd4jLong zLength, const Nd4jLong position) {
for (int t = blockIdx.x; t < numTads; t += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t];
auto z = reinterpret_cast<X*>(vz);
// sort tad
if (tadLength > 1) {
for (int m = 0; m < tadLength; m++) {
if (m % 2 == 0) {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo, tadLength);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo, tadLength);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo, tadLength);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo, tadLength);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
}
__syncthreads();
}
}
// saving final value
if (threadIdx.x == 0)
z[shape::getIndexOffset(t, zShapeInfo, zLength)] = x[shape::getIndexOffset(position, xTadShapeInfo, tadLength)];
__syncthreads();
}
}
template <typename T>
static void _percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) {
const int inputRank = input.rankOf();
if(axis.empty())
for(int i=0; i<inputRank; ++i)
axis.push_back(i);
else
shape::checkDimensions(inputRank, axis);
auto tempArray = input.dup(input.ordering());
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray->getShapeInfo(), axis);
auto tadLength = shape::length(packX.primaryShapeInfo());
const float fraction = 1.f - q / 100.;
Nd4jLong position = 0;
switch(interpolation) {
case 0: // lower
position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction));
break;
case 1: // higher
position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction));
break;
case 2: // nearest
position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction));
break;
}
position = tadLength - position - 1;
percentileKernel<T><<<256, 512, 1024, *context->getCudaStream()>>>(tempArray->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position);
nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile");
delete tempArray;
}
void percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
BUILD_SINGLE_TEMPLATE(template void _percentile, (nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES);
}
}
} |
adbcd751a83953f49b2eeab66c044751d555015a.hip | // !!! This is a file automatically generated by hipify!!!
#include "heat3d.h"
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(hipError_t error, const char *file, const int line)
{
#if defined(DEBUG) || defined(_DEBUG)
if (error != hipSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError()));
exit(-1);
}
#endif
return;
}
/****************************************************/
/* Function that scans for devices on a single node */
/****************************************************/
extern "C" int DeviceScan()
{
int numberOfDevices;
checkCuda(hipGetDeviceCount(&numberOfDevices));
return numberOfDevices;
}
/*******************************************************************/
/* Function that checks if multiple GPUs are available on the node */
/*******************************************************************/
extern "C" void MPIDeviceCheck(int rank, int numberOfProcesses, int numberOfDevices)
{
if (numberOfDevices < 2)
{
printf("Less than two devices were found.\n");
printf("Exiting...\n");
Finalize();
exit(-1);
}
if (numberOfProcesses > numberOfDevices)
{
printf("Number of processors exceeds the number of GPUs\n");
printf("Exiting...\n");
Finalize();
exit(-1);
}
}
/*****************************************************************/
/* Function that assigns a single device to a single MPI process */
/*****************************************************************/
extern "C" void AssignDevices(int rank)
{
int numberOfDevices = 0;
checkCuda(hipGetDeviceCount(&numberOfDevices));
checkCuda(hipSetDevice(rank % numberOfDevices));
printf("Process %d -> GPU%d\n", rank, rank % numberOfDevices);
}
/************************************************************************/
/* Function that checks if ECC is turned on for the devices on the node */
/************************************************************************/
extern "C" void ECCCheck(int rank)
{
hipDeviceProp_t properties;
checkCuda(hipGetDeviceProperties(&properties, rank));
if (properties.ECCEnabled == 1)
{
printf("ECC is turned on for device #%d\n", rank);
}
else
{
printf("ECC is turned off for device #%d\n", rank);
}
}
/**********************************/
/* Computes the thread block size */
/**********************************/
extern "C" int getBlock(int n, int block)
{
return (n+2)/block + ((n+2)%block == 0?0:1);
}
| adbcd751a83953f49b2eeab66c044751d555015a.cu | #include "heat3d.h"
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(cudaError_t error, const char *file, const int line)
{
#if defined(DEBUG) || defined(_DEBUG)
if (error != cudaSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
#endif
return;
}
/****************************************************/
/* Function that scans for devices on a single node */
/****************************************************/
extern "C" int DeviceScan()
{
int numberOfDevices;
checkCuda(cudaGetDeviceCount(&numberOfDevices));
return numberOfDevices;
}
/*******************************************************************/
/* Function that checks if multiple GPUs are available on the node */
/*******************************************************************/
extern "C" void MPIDeviceCheck(int rank, int numberOfProcesses, int numberOfDevices)
{
if (numberOfDevices < 2)
{
printf("Less than two devices were found.\n");
printf("Exiting...\n");
Finalize();
exit(-1);
}
if (numberOfProcesses > numberOfDevices)
{
printf("Number of processors exceeds the number of GPUs\n");
printf("Exiting...\n");
Finalize();
exit(-1);
}
}
/*****************************************************************/
/* Function that assigns a single device to a single MPI process */
/*****************************************************************/
extern "C" void AssignDevices(int rank)
{
int numberOfDevices = 0;
checkCuda(cudaGetDeviceCount(&numberOfDevices));
checkCuda(cudaSetDevice(rank % numberOfDevices));
printf("Process %d -> GPU%d\n", rank, rank % numberOfDevices);
}
/************************************************************************/
/* Function that checks if ECC is turned on for the devices on the node */
/************************************************************************/
extern "C" void ECCCheck(int rank)
{
cudaDeviceProp properties;
checkCuda(cudaGetDeviceProperties(&properties, rank));
if (properties.ECCEnabled == 1)
{
printf("ECC is turned on for device #%d\n", rank);
}
else
{
printf("ECC is turned off for device #%d\n", rank);
}
}
/**********************************/
/* Computes the thread block size */
/**********************************/
extern "C" int getBlock(int n, int block)
{
return (n+2)/block + ((n+2)%block == 0?0:1);
}
|
1a23ca78617a1e9e3a4a20c2709c11b7e0d2e90c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
void check_error(hipError_t status)
{
//hipDeviceSynchronize();
hipError_t status2 = hipGetLastError();
if (status != hipSuccess)
{
const char *s = hipGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
if (status2 != hipSuccess)
{
const char *s = hipGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
}
}
void cuda_set_device(int n)
{
gpu_index = n;
hipError_t status = hipSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
hipError_t status = hipGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
#endif
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
hipError_t status = hipFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x, x_gpu, size, hipMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> >(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
activate_array_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, a);
check_error(hipPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(hipPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(hipPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(hipPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ __host__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / 64 + 1;
const int num_blocks = size / BLOCK + 1;
transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
/*
// A (weights) in the shared_memory
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
//if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) {
//for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights
// }
//}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
if (i < M) // l.n - filters [16 - 55 - 1024]
{
float mean_val = mean_arr[i];
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
#include <cstdio>
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
*/
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
// Coalesced memory access
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) *mean_val + bias_val;
}
}
}
}
/*
// Coalescing
// B (input) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits
int start_j = blockIdx.x*blockDim.x / M;
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1;
size_t shared_size = ldb * (end_j - start_j);
int j_cur = index / M;
int local_j = j_cur - start_j;
for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8));
}
__syncthreads();
int i, j, k;
i = index % M; // l.n - filters [16 - 55 - 1024]
{
j = index / M; // out_h*out_w - one channel output size [169 - 173056]
if (j < N)
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
//ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val + bias_val;
}
}
}
}
*/
// Coalesced memory access - GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
| 1a23ca78617a1e9e3a4a20c2709c11b7e0d2e90c.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
void check_error(cudaError_t status)
{
//cudaDeviceSynchronize();
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
if (status2 != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
}
}
void cuda_set_device(int n)
{
gpu_index = n;
cudaError_t status = cudaSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
cudaError_t status = cudaGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
#endif
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> >(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
activate_array_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, a);
check_error(cudaPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(cudaPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(cudaPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(cudaPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ __host__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / 64 + 1;
const int num_blocks = size / BLOCK + 1;
transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
/*
// A (weights) in the shared_memory
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
//if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) {
//for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights
// }
//}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
if (i < M) // l.n - filters [16 - 55 - 1024]
{
float mean_val = mean_arr[i];
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
#include <cstdio>
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
*/
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
// Coalesced memory access
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) *mean_val + bias_val;
}
}
}
}
/*
// Coalescing
// B (input) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits
int start_j = blockIdx.x*blockDim.x / M;
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1;
size_t shared_size = ldb * (end_j - start_j);
int j_cur = index / M;
int local_j = j_cur - start_j;
for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8));
}
__syncthreads();
int i, j, k;
i = index % M; // l.n - filters [16 - 55 - 1024]
{
j = index / M; // out_h*out_w - one channel output size [169 - 173056]
if (j < N)
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
//ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val + bias_val;
}
}
}
}
*/
// Coalesced memory access - GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
|
a15de511711664ad03ca7369060db16b6b39daa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
int main(int argc, char** argv)
{
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
Graph<OutEdgeWeighted> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
graph.label1[i] = true;
graph.label2[i] = false;
}
graph.value[arguments.sourceNode] = DIST_INFINITY;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
Subgraph<OutEdgeWeighted> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdgeWeighted> subgen(graph);
subgen.generate(graph, subgraph);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.label1[i] = false;
}
graph.label1[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
Partitioner<OutEdgeWeighted> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
hipDeviceSynchronize();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdgeWeighted), hipMemcpyHostToDevice));
hipDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
hipLaunchKernelGGL(( mixLabels), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512), 0, 0, subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
cout << "\t\tIteration " << ++itr << endl;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sswp_async), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_finished,
(itr%2==1) ? graph.d_label1 : graph.d_label2,
(itr%2==1) ? graph.d_label2 : graph.d_label1);
hipDeviceSynchronize();
gpuErrorcheck( hipPeekAtLastError() );
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), hipMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
| a15de511711664ad03ca7369060db16b6b39daa0.cu | #include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
int main(int argc, char** argv)
{
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
Graph<OutEdgeWeighted> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
graph.label1[i] = true;
graph.label2[i] = false;
}
graph.value[arguments.sourceNode] = DIST_INFINITY;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
Subgraph<OutEdgeWeighted> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdgeWeighted> subgen(graph);
subgen.generate(graph, subgraph);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.label1[i] = false;
}
graph.label1[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
Partitioner<OutEdgeWeighted> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
cudaDeviceSynchronize();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdgeWeighted), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
cout << "\t\tIteration " << ++itr << endl;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
sswp_async<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_finished,
(itr%2==1) ? graph.d_label1 : graph.d_label2,
(itr%2==1) ? graph.d_label2 : graph.d_label1);
cudaDeviceSynchronize();
gpuErrorcheck( cudaPeekAtLastError() );
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), cudaMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
98d3fa4285494dcfedaa0939e7335da786aefbbe.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void remainder_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (!std::is_unsigned<scalar_t>::value && (r != 0) && ((r < 0) != (b < 0))) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (!std::is_unsigned<scalar_t>::value && (mod != 0) && ((b < 0) != (mod < 0))) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_cuda(TensorIterator& iter) {
// Use the dtype of the first argument to retain BC,
// change to common_dtype for type promotion in the future
// Issue #47779: https://github.com/pytorch/pytorch/issues/47779
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);
}} // namespace at::native
| 98d3fa4285494dcfedaa0939e7335da786aefbbe.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void remainder_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (!std::is_unsigned<scalar_t>::value && (r != 0) && ((r < 0) != (b < 0))) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (!std::is_unsigned<scalar_t>::value && (mod != 0) && ((b < 0) != (mod < 0))) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_cuda(TensorIterator& iter) {
// Use the dtype of the first argument to retain BC,
// change to common_dtype for type promotion in the future
// Issue #47779: https://github.com/pytorch/pytorch/issues/47779
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);
}} // namespace at::native
|
d662b4f0c46633de49cb550a95f5c33a838b2f95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////In the first iteration, L2 tlb does prefetch, while L1 tlb doesn't.
///////////When data size reaches 512 MB, L2 tlb becomes saturated and starts to miss.
///////////Because when changing the data stride, the tlb miss latency does not change, so it is actually not prefetching but the page size is 32MB.
///////////In the second iteration, however, L2 cache seems to never miss.
///////////The 400s, some of the 600s, 900s are appearing randomly.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 2 * 256 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, mod, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| d662b4f0c46633de49cb550a95f5c33a838b2f95.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////In the first iteration, L2 tlb does prefetch, while L1 tlb doesn't.
///////////When data size reaches 512 MB, L2 tlb becomes saturated and starts to miss.
///////////Because when changing the data stride, the tlb miss latency does not change, so it is actually not prefetching but the page size is 32MB.
///////////In the second iteration, however, L2 cache seems to never miss.
///////////The 400s, some of the 600s, 900s are appearing randomly.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 2 * 256 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, mod, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
1fc4327e4bb6fef2fffa37f3e6e6866fff1e0a1f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 2017,2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <sstream>
#include <mpi.h>
#define MPI_CALL(call) \
{ \
int mpi_status = call; \
if (0 != mpi_status) { \
char mpi_error_string[MPI_MAX_ERROR_STRING]; \
int mpi_error_string_length = 0; \
MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \
if (NULL != mpi_error_string) \
fprintf(stderr, \
"ERROR: MPI call \"%s\" in line %d of file %s failed " \
"with %s " \
"(%d).\n", \
#call, __LINE__, __FILE__, mpi_error_string, mpi_status); \
else \
fprintf(stderr, \
"ERROR: MPI call \"%s\" in line %d of file %s failed " \
"with %d.\n", \
#call, __LINE__, __FILE__, mpi_status); \
} \
}
#include <hip/hip_runtime.h>
//TODO: Include NVSHMEM headers
#include <nvshmem.h>
#include <nvshmemx.h>
#ifdef HAVE_CUB
#include <hipcub/hipcub.hpp>
#endif // HAVE_CUB
#ifdef USE_NVTX
#include <roctracer/roctx.h>
const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff,
0x0000ffff, 0x00ff0000, 0x00ffffff};
const int num_colors = sizeof(colors) / sizeof(uint32_t);
#define PUSH_RANGE(name, cid) \
{ \
int color_id = cid; \
color_id = color_id % num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE roctxRangePop();
#else
#define PUSH_RANGE(name, cid)
#define POP_RANGE
#endif
#define CUDA_RT_CALL(call) \
{ \
hipError_t cudaStatus = call; \
if (hipSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \
}
#ifdef USE_DOUBLE
typedef double real;
#define MPI_REAL_TYPE MPI_DOUBLE
#else
typedef float real;
#define MPI_REAL_TYPE MPI_FLOAT
#endif
constexpr real tol = 1.0e-8;
const real PI = 2.0 * std::asin(1.0);
__global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a,
const real pi, const int offset, const int nx,
const int my_ny, const int ny) {
for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) {
const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1));
a[iy * nx + 0] = y0;
a[iy * nx + (nx - 1)] = y0;
a_new[iy * nx + 0] = y0;
a_new[iy * nx + (nx - 1)] = y0;
}
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start,
const int iy_end, const int nx, const bool calculate_norm) {
#ifdef HAVE_CUB
typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif // HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if (iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
if (calculate_norm) {
real residue = new_val - a[iy * nx + ix];
local_l2_norm += residue * residue;
}
}
if (calculate_norm) {
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm);
#else
atomicAdd(l2_norm, local_l2_norm);
#endif // HAVE_CUB
}
}
void launch_jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start, const int iy_end,
const int nx, const bool calculate_norm, hipStream_t stream) {
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 32;
dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x,
((iy_end - iy_start) + dim_block_y - 1) / dim_block_y, 1);
hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>), dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, stream,
a_new, a, l2_norm, iy_start, iy_end, nx, calculate_norm);
CUDA_RT_CALL(hipGetLastError());
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h,
const int nccheck, const bool print);
template <typename T>
T get_argval(char** begin, char** end, const std::string& arg, const T default_val) {
T argval = default_val;
char** itr = std::find(begin, end, arg);
if (itr != end && ++itr != end) {
std::istringstream inbuf(*itr);
inbuf >> argval;
}
return argval;
}
bool get_arg(char** begin, char** end, const std::string& arg) {
char** itr = std::find(begin, end, arg);
if (itr != end) {
return true;
}
return false;
}
int main(int argc, char* argv[]) {
MPI_CALL(MPI_Init(&argc, &argv));
int rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
int size;
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size));
int num_devices = 0;
CUDA_RT_CALL(hipGetDeviceCount(&num_devices));
const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000);
const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1);
const int nx = get_argval<int>(argv, argv + argc, "-nx", 16384);
const int ny = get_argval<int>(argv, argv + argc, "-ny", 16384);
const bool csv = get_arg(argv, argv + argc, "-csv");
int local_rank = -1;
{
MPI_Comm local_comm;
MPI_CALL(MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL,
&local_comm));
MPI_CALL(MPI_Comm_rank(local_comm, &local_rank));
MPI_CALL(MPI_Comm_free(&local_comm));
}
CUDA_RT_CALL(hipSetDevice(local_rank%num_devices));
CUDA_RT_CALL(hipFree(0));
//TODO: Initialize NVSHMEM using nvshmemx_init_attr
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
assert( size == nvshmem_n_pes() );
assert( rank == nvshmem_my_pe() );
real* a_ref_h;
CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(real)));
real* a_h;
CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(real)));
double runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == rank));
// ny - 2 rows are distributed amongst `size` ranks in such a way
// that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows.
// This optimizes load balancing when (ny - 2) % size != 0
int chunk_size;
int chunk_size_low = (ny - 2) / size;
int chunk_size_high = chunk_size_low + 1;
// To calculate the number of ranks that need to compute an extra row,
// the following formula is derived from this equation:
// num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2
int num_ranks_low = size * chunk_size_low + size -
(ny - 2); // Number of ranks with chunk_size = chunk_size_low
if (rank < num_ranks_low)
chunk_size = chunk_size_low;
else
chunk_size = chunk_size_high;
//TODO: Allocate a and a_new from the NVSHMEM symmetric heap instead of using hipMalloc
// Note: size needs to be the same on all PEs but chunk_size might not be!
real* a = (real*) nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real));
real* a_new = (real*) nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real));
CUDA_RT_CALL(hipMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)));
CUDA_RT_CALL(hipMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)));
// Calculate local domain boundaries
int iy_start_global; // My start index in the global array
if (rank < num_ranks_low) {
iy_start_global = rank * chunk_size_low + 1;
} else {
iy_start_global =
num_ranks_low * chunk_size_low + (rank - num_ranks_low) * chunk_size_high + 1;
}
int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array
int iy_start = 1;
int iy_end = iy_start + chunk_size;
const int top = rank > 0 ? rank - 1 : (size - 1);
const int bottom = (rank + 1) % size;
//TODO: calculate halo/boundary row index of top and bottom neighbors
const int iy_top_lower_boundary_idx = (top < num_ranks_low) ? (chunk_size_low + 1) : (chunk_size_high + 1);
const int iy_bottom_upper_boundary_idx = 0;
// Set diriclet boundary conditions on left and right boarder
hipLaunchKernelGGL(( initialize_boundaries), dim3((chunk_size + 2) / 128 + 1), dim3(128), 0, 0, a, a_new, PI, iy_start_global - 1, nx, (chunk_size + 2), ny);
CUDA_RT_CALL(hipGetLastError());
CUDA_RT_CALL(hipDeviceSynchronize());
int leastPriority = 0;
int greatestPriority = leastPriority;
CUDA_RT_CALL(hipDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority));
hipStream_t compute_stream;
CUDA_RT_CALL(hipStreamCreateWithPriority(&compute_stream, hipStreamDefault, leastPriority));
hipStream_t push_stream;
CUDA_RT_CALL(
hipStreamCreateWithPriority(&push_stream, hipStreamDefault, greatestPriority));
hipEvent_t push_prep_done;
CUDA_RT_CALL(hipEventCreateWithFlags(&push_prep_done, hipEventDisableTiming));
hipEvent_t push_done;
CUDA_RT_CALL(hipEventCreateWithFlags(&push_done, hipEventDisableTiming));
hipEvent_t reset_l2norm_done;
CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2norm_done, hipEventDisableTiming));
// hipStream_t compute_stream;
// CUDA_RT_CALL(hipStreamCreate(&compute_stream));
// hipEvent_t compute_done;
// CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done, hipEventDisableTiming));
real* l2_norm_d;
CUDA_RT_CALL(hipMalloc(&l2_norm_d, sizeof(real)));
real* l2_norm_h;
CUDA_RT_CALL(hipHostMalloc(&l2_norm_h, sizeof(real)));
PUSH_RANGE("MPI_Warmup", 5)
for (int i = 0; i < 10; ++i) {
const int top = rank > 0 ? rank - 1 : (size - 1);
const int bottom = (rank + 1) % size;
MPI_CALL(MPI_Sendrecv(a_new + iy_start * nx, nx, MPI_REAL_TYPE, top, 0,
a_new + (iy_end * nx), nx, MPI_REAL_TYPE, bottom, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE));
MPI_CALL(MPI_Sendrecv(a_new + (iy_end - 1) * nx, nx, MPI_REAL_TYPE, bottom, 0, a_new, nx,
MPI_REAL_TYPE, top, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE));
std::swap(a_new, a);
}
POP_RANGE
CUDA_RT_CALL(hipDeviceSynchronize());
if (!csv && 0 == rank) {
printf(
"Jacobi relaxation: %d iterations on %d x %d mesh with norm check "
"every %d iterations\n",
iter_max, ny, nx, nccheck);
}
int iter = 0;
real l2_norm = 1.0;
bool calculate_norm; // boolean to store whether l2 norm will be calculated in
// an iteration or not
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
double start = MPI_Wtime();
PUSH_RANGE("Jacobi solve", 0)
while (l2_norm > tol && iter < iter_max) {
CUDA_RT_CALL(hipMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream));
CUDA_RT_CALL(hipEventRecord(reset_l2norm_done, compute_stream));
CUDA_RT_CALL(hipStreamWaitEvent(push_stream, reset_l2norm_done, 0));
calculate_norm = (iter % nccheck) == 0 || (!csv && (iter % 100) == 0);
launch_jacobi_kernel(a_new, a, l2_norm_d, (iy_start + 1), (iy_end - 1), nx, calculate_norm, compute_stream);
launch_jacobi_kernel(a_new, a, l2_norm_d, iy_start, (iy_start + 1), nx, calculate_norm, push_stream);
launch_jacobi_kernel(a_new, a, l2_norm_d, (iy_end - 1), iy_end, nx, calculate_norm, push_stream);
CUDA_RT_CALL(hipEventRecord(push_prep_done, push_stream));
if (calculate_norm) {
CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, push_prep_done, 0));
CUDA_RT_CALL(hipMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost,
compute_stream));
}
//TODO: Replace MPI communication with Host initiated NVSHMEM calls
// Apply periodic boundary conditions
PUSH_RANGE("NVSHMEM", 5)
nvshmemx_float_put_on_stream(a_new + iy_top_lower_boundary_idx * nx, a_new + iy_start * nx, nx, top, push_stream);
nvshmemx_float_put_on_stream(a_new + iy_bottom_upper_boundary_idx * nx, a_new + (iy_end - 1) * nx, nx, bottom, push_stream);
CUDA_RT_CALL(hipEventRecord(push_done, push_stream));
POP_RANGE
CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, push_done, 0));
//TODO: add necessary inter PE synchronization using the nvshmemx_barrier_all_on_stream(...)
nvshmemx_barrier_all_on_stream(compute_stream);
if (calculate_norm) {
CUDA_RT_CALL(hipStreamSynchronize(compute_stream));
MPI_CALL(MPI_Allreduce(l2_norm_h, &l2_norm, 1, MPI_REAL_TYPE, MPI_SUM, MPI_COMM_WORLD));
l2_norm = std::sqrt(l2_norm);
if (!csv && 0 == rank && (iter % 100) == 0) {
printf("%5d, %0.6f\n", iter, l2_norm);
}
}
std::swap(a_new, a);
iter++;
}
CUDA_RT_CALL(hipDeviceSynchronize());
double stop = MPI_Wtime();
POP_RANGE
CUDA_RT_CALL(hipMemcpy(a_h + iy_start_global * nx, a + nx,
::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real),
hipMemcpyDeviceToHost));
int result_correct = 1;
for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) {
for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) {
if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) {
fprintf(stderr,
"ERROR on rank %d: a[%d * %d + %d] = %f does not match %f "
"(reference)\n",
rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]);
result_correct = 0;
}
}
}
int global_result_correct = 1;
MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN,
MPI_COMM_WORLD));
result_correct = global_result_correct;
if (rank == 0 && result_correct) {
if (csv) {
//TODO: Replace MPI with NVSHMEM for your output
printf("nvshmem, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, size,
(stop - start), runtime_serial);
} else {
printf("Num GPUs: %d.\n", size);
printf(
"%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, "
"efficiency: %8.2f \n",
ny, nx, runtime_serial, size, (stop - start), runtime_serial / (stop - start),
runtime_serial / (size * (stop - start)) * 100);
}
}
CUDA_RT_CALL(hipEventDestroy(reset_l2norm_done));
CUDA_RT_CALL(hipEventDestroy(push_done));
CUDA_RT_CALL(hipEventDestroy(push_prep_done));
CUDA_RT_CALL(hipStreamDestroy(push_stream));
CUDA_RT_CALL(hipStreamDestroy(compute_stream));
CUDA_RT_CALL(hipHostFree(l2_norm_h));
CUDA_RT_CALL(hipFree(l2_norm_d));
//TODO: Deallocated a_new and a from the NVSHMEM symmetric heap instead of using hipFree()
nvshmem_free(a_new);
nvshmem_free(a);
CUDA_RT_CALL(hipHostFree(a_h));
CUDA_RT_CALL(hipHostFree(a_ref_h));
//TODO: Finalize NVSHMEM
nvshmem_finalize();
MPI_CALL(MPI_Finalize());
return (result_correct == 1) ? 0 : 1;
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel_single_gpu(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start,
const int iy_end, const int nx, const bool calculate_norm) {
#ifdef HAVE_CUB
typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif // HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if (iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
if (calculate_norm) {
real residue = new_val - a[iy * nx + ix];
local_l2_norm += residue * residue;
}
}
if (calculate_norm) {
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm);
#else
atomicAdd(l2_norm, local_l2_norm);
#endif // HAVE_CUB
}
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h,
const int nccheck, const bool print) {
real* a;
real* a_new;
hipStream_t compute_stream;
hipStream_t push_top_stream;
hipStream_t push_bottom_stream;
hipEvent_t compute_done;
hipEvent_t push_top_done;
hipEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
int iy_start = 1;
int iy_end = (ny - 1);
CUDA_RT_CALL(hipMalloc(&a, nx * ny * sizeof(real)));
CUDA_RT_CALL(hipMalloc(&a_new, nx * ny * sizeof(real)));
CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real)));
CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real)));
// Set diriclet boundary conditions on left and right boarder
hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny, ny);
CUDA_RT_CALL(hipGetLastError());
CUDA_RT_CALL(hipDeviceSynchronize());
CUDA_RT_CALL(hipStreamCreate(&compute_stream));
CUDA_RT_CALL(hipStreamCreate(&push_top_stream));
CUDA_RT_CALL(hipStreamCreate(&push_bottom_stream));
CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done, hipEventDisableTiming));
CUDA_RT_CALL(hipEventCreateWithFlags(&push_top_done, hipEventDisableTiming));
CUDA_RT_CALL(hipEventCreateWithFlags(&push_bottom_done, hipEventDisableTiming));
CUDA_RT_CALL(hipMalloc(&l2_norm_d, sizeof(real)));
CUDA_RT_CALL(hipHostMalloc(&l2_norm_h, sizeof(real)));
CUDA_RT_CALL(hipDeviceSynchronize());
if (print)
printf(
"Single GPU jacobi relaxation: %d iterations on %d x %d mesh with "
"norm "
"check every %d iterations\n",
iter_max, ny, nx, nccheck);
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 32;
dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x,
((iy_end - iy_start) + dim_block_y - 1) / dim_block_y, 1);
int iter = 0;
real l2_norm = 1.0;
bool calculate_norm;
double start = MPI_Wtime();
PUSH_RANGE("Jacobi solve", 0)
while (l2_norm > tol && iter < iter_max) {
CUDA_RT_CALL(hipMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream));
CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, push_top_done, 0));
CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, push_bottom_done, 0));
calculate_norm = (iter % nccheck) == 0 || (iter % 100) == 0;
hipLaunchKernelGGL(( jacobi_kernel_single_gpu<dim_block_x, dim_block_y>), dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream,
a_new, a, l2_norm_d, iy_start, iy_end, nx, calculate_norm);
CUDA_RT_CALL(hipGetLastError());
CUDA_RT_CALL(hipEventRecord(compute_done, compute_stream));
if (calculate_norm) {
CUDA_RT_CALL(hipMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost,
compute_stream));
}
// Apply periodic boundary conditions
CUDA_RT_CALL(hipStreamWaitEvent(push_top_stream, compute_done, 0));
CUDA_RT_CALL(hipMemcpyAsync(a_new, a_new + (iy_end - 1) * nx, nx * sizeof(real),
hipMemcpyDeviceToDevice, push_top_stream));
CUDA_RT_CALL(hipEventRecord(push_top_done, push_top_stream));
CUDA_RT_CALL(hipStreamWaitEvent(push_bottom_stream, compute_done, 0));
CUDA_RT_CALL(hipMemcpyAsync(a_new + iy_end * nx, a_new + iy_start * nx, nx * sizeof(real),
hipMemcpyDeviceToDevice, compute_stream));
CUDA_RT_CALL(hipEventRecord(push_bottom_done, push_bottom_stream));
if (calculate_norm) {
CUDA_RT_CALL(hipStreamSynchronize(compute_stream));
l2_norm = *l2_norm_h;
l2_norm = std::sqrt(l2_norm);
if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm);
}
std::swap(a_new, a);
iter++;
}
POP_RANGE
double stop = MPI_Wtime();
CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(real), hipMemcpyDeviceToHost));
CUDA_RT_CALL(hipEventDestroy(push_bottom_done));
CUDA_RT_CALL(hipEventDestroy(push_top_done));
CUDA_RT_CALL(hipEventDestroy(compute_done));
CUDA_RT_CALL(hipStreamDestroy(push_bottom_stream));
CUDA_RT_CALL(hipStreamDestroy(push_top_stream));
CUDA_RT_CALL(hipStreamDestroy(compute_stream));
CUDA_RT_CALL(hipHostFree(l2_norm_h));
CUDA_RT_CALL(hipFree(l2_norm_d));
CUDA_RT_CALL(hipFree(a_new));
CUDA_RT_CALL(hipFree(a));
return (stop - start);
}
| 1fc4327e4bb6fef2fffa37f3e6e6866fff1e0a1f.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 2017,2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <sstream>
#include <mpi.h>
#define MPI_CALL(call) \
{ \
int mpi_status = call; \
if (0 != mpi_status) { \
char mpi_error_string[MPI_MAX_ERROR_STRING]; \
int mpi_error_string_length = 0; \
MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \
if (NULL != mpi_error_string) \
fprintf(stderr, \
"ERROR: MPI call \"%s\" in line %d of file %s failed " \
"with %s " \
"(%d).\n", \
#call, __LINE__, __FILE__, mpi_error_string, mpi_status); \
else \
fprintf(stderr, \
"ERROR: MPI call \"%s\" in line %d of file %s failed " \
"with %d.\n", \
#call, __LINE__, __FILE__, mpi_status); \
} \
}
#include <cuda_runtime.h>
//TODO: Include NVSHMEM headers
#include <nvshmem.h>
#include <nvshmemx.h>
#ifdef HAVE_CUB
#include <cub/block/block_reduce.cuh>
#endif // HAVE_CUB
#ifdef USE_NVTX
#include <nvToolsExt.h>
const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff,
0x0000ffff, 0x00ff0000, 0x00ffffff};
const int num_colors = sizeof(colors) / sizeof(uint32_t);
#define PUSH_RANGE(name, cid) \
{ \
int color_id = cid; \
color_id = color_id % num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name, cid)
#define POP_RANGE
#endif
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \
}
#ifdef USE_DOUBLE
typedef double real;
#define MPI_REAL_TYPE MPI_DOUBLE
#else
typedef float real;
#define MPI_REAL_TYPE MPI_FLOAT
#endif
constexpr real tol = 1.0e-8;
const real PI = 2.0 * std::asin(1.0);
__global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a,
const real pi, const int offset, const int nx,
const int my_ny, const int ny) {
for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) {
const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1));
a[iy * nx + 0] = y0;
a[iy * nx + (nx - 1)] = y0;
a_new[iy * nx + 0] = y0;
a_new[iy * nx + (nx - 1)] = y0;
}
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start,
const int iy_end, const int nx, const bool calculate_norm) {
#ifdef HAVE_CUB
typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif // HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if (iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
if (calculate_norm) {
real residue = new_val - a[iy * nx + ix];
local_l2_norm += residue * residue;
}
}
if (calculate_norm) {
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm);
#else
atomicAdd(l2_norm, local_l2_norm);
#endif // HAVE_CUB
}
}
void launch_jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start, const int iy_end,
const int nx, const bool calculate_norm, cudaStream_t stream) {
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 32;
dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x,
((iy_end - iy_start) + dim_block_y - 1) / dim_block_y, 1);
jacobi_kernel<dim_block_x, dim_block_y><<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, stream>>>(
a_new, a, l2_norm, iy_start, iy_end, nx, calculate_norm);
CUDA_RT_CALL(cudaGetLastError());
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h,
const int nccheck, const bool print);
template <typename T>
T get_argval(char** begin, char** end, const std::string& arg, const T default_val) {
T argval = default_val;
char** itr = std::find(begin, end, arg);
if (itr != end && ++itr != end) {
std::istringstream inbuf(*itr);
inbuf >> argval;
}
return argval;
}
bool get_arg(char** begin, char** end, const std::string& arg) {
char** itr = std::find(begin, end, arg);
if (itr != end) {
return true;
}
return false;
}
int main(int argc, char* argv[]) {
MPI_CALL(MPI_Init(&argc, &argv));
int rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
int size;
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size));
int num_devices = 0;
CUDA_RT_CALL(cudaGetDeviceCount(&num_devices));
const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000);
const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1);
const int nx = get_argval<int>(argv, argv + argc, "-nx", 16384);
const int ny = get_argval<int>(argv, argv + argc, "-ny", 16384);
const bool csv = get_arg(argv, argv + argc, "-csv");
int local_rank = -1;
{
MPI_Comm local_comm;
MPI_CALL(MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL,
&local_comm));
MPI_CALL(MPI_Comm_rank(local_comm, &local_rank));
MPI_CALL(MPI_Comm_free(&local_comm));
}
CUDA_RT_CALL(cudaSetDevice(local_rank%num_devices));
CUDA_RT_CALL(cudaFree(0));
//TODO: Initialize NVSHMEM using nvshmemx_init_attr
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
assert( size == nvshmem_n_pes() );
assert( rank == nvshmem_my_pe() );
real* a_ref_h;
CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(real)));
real* a_h;
CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(real)));
double runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == rank));
// ny - 2 rows are distributed amongst `size` ranks in such a way
// that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows.
// This optimizes load balancing when (ny - 2) % size != 0
int chunk_size;
int chunk_size_low = (ny - 2) / size;
int chunk_size_high = chunk_size_low + 1;
// To calculate the number of ranks that need to compute an extra row,
// the following formula is derived from this equation:
// num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2
int num_ranks_low = size * chunk_size_low + size -
(ny - 2); // Number of ranks with chunk_size = chunk_size_low
if (rank < num_ranks_low)
chunk_size = chunk_size_low;
else
chunk_size = chunk_size_high;
//TODO: Allocate a and a_new from the NVSHMEM symmetric heap instead of using cudaMalloc
// Note: size needs to be the same on all PEs but chunk_size might not be!
real* a = (real*) nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real));
real* a_new = (real*) nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real));
CUDA_RT_CALL(cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)));
CUDA_RT_CALL(cudaMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)));
// Calculate local domain boundaries
int iy_start_global; // My start index in the global array
if (rank < num_ranks_low) {
iy_start_global = rank * chunk_size_low + 1;
} else {
iy_start_global =
num_ranks_low * chunk_size_low + (rank - num_ranks_low) * chunk_size_high + 1;
}
int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array
int iy_start = 1;
int iy_end = iy_start + chunk_size;
const int top = rank > 0 ? rank - 1 : (size - 1);
const int bottom = (rank + 1) % size;
//TODO: calculate halo/boundary row index of top and bottom neighbors
const int iy_top_lower_boundary_idx = (top < num_ranks_low) ? (chunk_size_low + 1) : (chunk_size_high + 1);
const int iy_bottom_upper_boundary_idx = 0;
// Set diriclet boundary conditions on left and right boarder
initialize_boundaries<<<(chunk_size + 2) / 128 + 1, 128>>>(a, a_new, PI, iy_start_global - 1, nx, (chunk_size + 2), ny);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
int leastPriority = 0;
int greatestPriority = leastPriority;
CUDA_RT_CALL(cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority));
cudaStream_t compute_stream;
CUDA_RT_CALL(cudaStreamCreateWithPriority(&compute_stream, cudaStreamDefault, leastPriority));
cudaStream_t push_stream;
CUDA_RT_CALL(
cudaStreamCreateWithPriority(&push_stream, cudaStreamDefault, greatestPriority));
cudaEvent_t push_prep_done;
CUDA_RT_CALL(cudaEventCreateWithFlags(&push_prep_done, cudaEventDisableTiming));
cudaEvent_t push_done;
CUDA_RT_CALL(cudaEventCreateWithFlags(&push_done, cudaEventDisableTiming));
cudaEvent_t reset_l2norm_done;
CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2norm_done, cudaEventDisableTiming));
// cudaStream_t compute_stream;
// CUDA_RT_CALL(cudaStreamCreate(&compute_stream));
// cudaEvent_t compute_done;
// CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming));
real* l2_norm_d;
CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(real)));
real* l2_norm_h;
CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(real)));
PUSH_RANGE("MPI_Warmup", 5)
for (int i = 0; i < 10; ++i) {
const int top = rank > 0 ? rank - 1 : (size - 1);
const int bottom = (rank + 1) % size;
MPI_CALL(MPI_Sendrecv(a_new + iy_start * nx, nx, MPI_REAL_TYPE, top, 0,
a_new + (iy_end * nx), nx, MPI_REAL_TYPE, bottom, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE));
MPI_CALL(MPI_Sendrecv(a_new + (iy_end - 1) * nx, nx, MPI_REAL_TYPE, bottom, 0, a_new, nx,
MPI_REAL_TYPE, top, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE));
std::swap(a_new, a);
}
POP_RANGE
CUDA_RT_CALL(cudaDeviceSynchronize());
if (!csv && 0 == rank) {
printf(
"Jacobi relaxation: %d iterations on %d x %d mesh with norm check "
"every %d iterations\n",
iter_max, ny, nx, nccheck);
}
int iter = 0;
real l2_norm = 1.0;
bool calculate_norm; // boolean to store whether l2 norm will be calculated in
// an iteration or not
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
double start = MPI_Wtime();
PUSH_RANGE("Jacobi solve", 0)
while (l2_norm > tol && iter < iter_max) {
CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream));
CUDA_RT_CALL(cudaEventRecord(reset_l2norm_done, compute_stream));
CUDA_RT_CALL(cudaStreamWaitEvent(push_stream, reset_l2norm_done, 0));
calculate_norm = (iter % nccheck) == 0 || (!csv && (iter % 100) == 0);
launch_jacobi_kernel(a_new, a, l2_norm_d, (iy_start + 1), (iy_end - 1), nx, calculate_norm, compute_stream);
launch_jacobi_kernel(a_new, a, l2_norm_d, iy_start, (iy_start + 1), nx, calculate_norm, push_stream);
launch_jacobi_kernel(a_new, a, l2_norm_d, (iy_end - 1), iy_end, nx, calculate_norm, push_stream);
CUDA_RT_CALL(cudaEventRecord(push_prep_done, push_stream));
if (calculate_norm) {
CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, push_prep_done, 0));
CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost,
compute_stream));
}
//TODO: Replace MPI communication with Host initiated NVSHMEM calls
// Apply periodic boundary conditions
PUSH_RANGE("NVSHMEM", 5)
nvshmemx_float_put_on_stream(a_new + iy_top_lower_boundary_idx * nx, a_new + iy_start * nx, nx, top, push_stream);
nvshmemx_float_put_on_stream(a_new + iy_bottom_upper_boundary_idx * nx, a_new + (iy_end - 1) * nx, nx, bottom, push_stream);
CUDA_RT_CALL(cudaEventRecord(push_done, push_stream));
POP_RANGE
CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, push_done, 0));
//TODO: add necessary inter PE synchronization using the nvshmemx_barrier_all_on_stream(...)
nvshmemx_barrier_all_on_stream(compute_stream);
if (calculate_norm) {
CUDA_RT_CALL(cudaStreamSynchronize(compute_stream));
MPI_CALL(MPI_Allreduce(l2_norm_h, &l2_norm, 1, MPI_REAL_TYPE, MPI_SUM, MPI_COMM_WORLD));
l2_norm = std::sqrt(l2_norm);
if (!csv && 0 == rank && (iter % 100) == 0) {
printf("%5d, %0.6f\n", iter, l2_norm);
}
}
std::swap(a_new, a);
iter++;
}
CUDA_RT_CALL(cudaDeviceSynchronize());
double stop = MPI_Wtime();
POP_RANGE
CUDA_RT_CALL(cudaMemcpy(a_h + iy_start_global * nx, a + nx,
std::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real),
cudaMemcpyDeviceToHost));
int result_correct = 1;
for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) {
for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) {
if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) {
fprintf(stderr,
"ERROR on rank %d: a[%d * %d + %d] = %f does not match %f "
"(reference)\n",
rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]);
result_correct = 0;
}
}
}
int global_result_correct = 1;
MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN,
MPI_COMM_WORLD));
result_correct = global_result_correct;
if (rank == 0 && result_correct) {
if (csv) {
//TODO: Replace MPI with NVSHMEM for your output
printf("nvshmem, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, size,
(stop - start), runtime_serial);
} else {
printf("Num GPUs: %d.\n", size);
printf(
"%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, "
"efficiency: %8.2f \n",
ny, nx, runtime_serial, size, (stop - start), runtime_serial / (stop - start),
runtime_serial / (size * (stop - start)) * 100);
}
}
CUDA_RT_CALL(cudaEventDestroy(reset_l2norm_done));
CUDA_RT_CALL(cudaEventDestroy(push_done));
CUDA_RT_CALL(cudaEventDestroy(push_prep_done));
CUDA_RT_CALL(cudaStreamDestroy(push_stream));
CUDA_RT_CALL(cudaStreamDestroy(compute_stream));
CUDA_RT_CALL(cudaFreeHost(l2_norm_h));
CUDA_RT_CALL(cudaFree(l2_norm_d));
//TODO: Deallocated a_new and a from the NVSHMEM symmetric heap instead of using cudaFree()
nvshmem_free(a_new);
nvshmem_free(a);
CUDA_RT_CALL(cudaFreeHost(a_h));
CUDA_RT_CALL(cudaFreeHost(a_ref_h));
//TODO: Finalize NVSHMEM
nvshmem_finalize();
MPI_CALL(MPI_Finalize());
return (result_correct == 1) ? 0 : 1;
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel_single_gpu(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start,
const int iy_end, const int nx, const bool calculate_norm) {
#ifdef HAVE_CUB
typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif // HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if (iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
if (calculate_norm) {
real residue = new_val - a[iy * nx + ix];
local_l2_norm += residue * residue;
}
}
if (calculate_norm) {
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm);
#else
atomicAdd(l2_norm, local_l2_norm);
#endif // HAVE_CUB
}
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h,
const int nccheck, const bool print) {
real* a;
real* a_new;
cudaStream_t compute_stream;
cudaStream_t push_top_stream;
cudaStream_t push_bottom_stream;
cudaEvent_t compute_done;
cudaEvent_t push_top_done;
cudaEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
int iy_start = 1;
int iy_end = (ny - 1);
CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real)));
// Set diriclet boundary conditions on left and right boarder
initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny, ny);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
CUDA_RT_CALL(cudaStreamCreate(&compute_stream));
CUDA_RT_CALL(cudaStreamCreate(&push_top_stream));
CUDA_RT_CALL(cudaStreamCreate(&push_bottom_stream));
CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming));
CUDA_RT_CALL(cudaEventCreateWithFlags(&push_top_done, cudaEventDisableTiming));
CUDA_RT_CALL(cudaEventCreateWithFlags(&push_bottom_done, cudaEventDisableTiming));
CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(real)));
CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(real)));
CUDA_RT_CALL(cudaDeviceSynchronize());
if (print)
printf(
"Single GPU jacobi relaxation: %d iterations on %d x %d mesh with "
"norm "
"check every %d iterations\n",
iter_max, ny, nx, nccheck);
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 32;
dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x,
((iy_end - iy_start) + dim_block_y - 1) / dim_block_y, 1);
int iter = 0;
real l2_norm = 1.0;
bool calculate_norm;
double start = MPI_Wtime();
PUSH_RANGE("Jacobi solve", 0)
while (l2_norm > tol && iter < iter_max) {
CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream));
CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, push_top_done, 0));
CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, push_bottom_done, 0));
calculate_norm = (iter % nccheck) == 0 || (iter % 100) == 0;
jacobi_kernel_single_gpu<dim_block_x, dim_block_y><<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>(
a_new, a, l2_norm_d, iy_start, iy_end, nx, calculate_norm);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaEventRecord(compute_done, compute_stream));
if (calculate_norm) {
CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost,
compute_stream));
}
// Apply periodic boundary conditions
CUDA_RT_CALL(cudaStreamWaitEvent(push_top_stream, compute_done, 0));
CUDA_RT_CALL(cudaMemcpyAsync(a_new, a_new + (iy_end - 1) * nx, nx * sizeof(real),
cudaMemcpyDeviceToDevice, push_top_stream));
CUDA_RT_CALL(cudaEventRecord(push_top_done, push_top_stream));
CUDA_RT_CALL(cudaStreamWaitEvent(push_bottom_stream, compute_done, 0));
CUDA_RT_CALL(cudaMemcpyAsync(a_new + iy_end * nx, a_new + iy_start * nx, nx * sizeof(real),
cudaMemcpyDeviceToDevice, compute_stream));
CUDA_RT_CALL(cudaEventRecord(push_bottom_done, push_bottom_stream));
if (calculate_norm) {
CUDA_RT_CALL(cudaStreamSynchronize(compute_stream));
l2_norm = *l2_norm_h;
l2_norm = std::sqrt(l2_norm);
if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm);
}
std::swap(a_new, a);
iter++;
}
POP_RANGE
double stop = MPI_Wtime();
CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(real), cudaMemcpyDeviceToHost));
CUDA_RT_CALL(cudaEventDestroy(push_bottom_done));
CUDA_RT_CALL(cudaEventDestroy(push_top_done));
CUDA_RT_CALL(cudaEventDestroy(compute_done));
CUDA_RT_CALL(cudaStreamDestroy(push_bottom_stream));
CUDA_RT_CALL(cudaStreamDestroy(push_top_stream));
CUDA_RT_CALL(cudaStreamDestroy(compute_stream));
CUDA_RT_CALL(cudaFreeHost(l2_norm_h));
CUDA_RT_CALL(cudaFree(l2_norm_d));
CUDA_RT_CALL(cudaFree(a_new));
CUDA_RT_CALL(cudaFree(a));
return (stop - start);
}
|
b4b57a1b823f5c53a5bf27ba689204a10bf6f6d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file gemm.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief GEMM Kernel
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define SM 64
static void reorder(float *__restrict__ a, float *__restrict__ b, int n)
{
for (int i = 0; i < SM; i++)
for (int j = 0; j < SM; j++)
b[i * SM + j] = a[i * n + j];
}
static void mm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
for (int i = 0; i < SM; i++)
{
for (int k = 0; k < SM; k++)
{
for (int j = 0; j < SM; j++)
{
c[i * n + j] += a[i * n + k] * b[k * SM + j];
}
}
}
}
void gemm_host(float *a, float *b, float *c, int n)
{
int bk = n / SM;
#pragma omp parallel for collapse(3)
for (int i = 0; i < bk; i++)
{
for (int j = 0; j < bk; j++)
{
for (int k = 0; k < bk; k++)
{
float b2[SM * SM];
reorder(&b[SM * (k * n + j)], b2, n);
mm(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n);
}
}
}
}
__global__ void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int ib = blockIdx.y;
int jb = blockIdx.x;
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
float Cvalue = 0.0f;
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
a_offset = ib * n * BLOCK_SIZE + kb * BLOCK_SIZE;
b_offset = kb * n * BLOCK_SIZE + jb * BLOCK_SIZE;
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Cvalue += As[it][k] * Bs[k][jt];
__syncthreads();
}
c_offset = ib * n * BLOCK_SIZE + jb * BLOCK_SIZE;
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary)
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
//TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary)
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
//TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary)
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
//TODO Update hipHostFree or hipFree (if necessary)
free(a);
//TODO Update hipHostFree or hipFree (if necessary)
free(b);
//TODO Update hipHostFree or hipFree (if necessary)
free(c);
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm_host(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
//TODO Remove if unecessary
float *d_a, *d_b, *d_c;
gpuErrchk(hipMalloc((void **)&d_a, sizeof(float) * n * n));
gpuErrchk(hipMalloc((void **)&d_b, sizeof(float) * n * n));
gpuErrchk(hipMalloc((void **)&d_c, sizeof(float) * n * n));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Remove if unecessary
gpuErrchk(hipMemcpy(d_a, a, sizeof(float) * n * n, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, b, sizeof(float) * n * n, hipMemcpyHostToDevice));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
hipLaunchKernelGGL(( gemm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n);
gpuErrchk(hipPeekAtLastError());
//TODO Remove if unecessary
gpuErrchk(hipMemcpy(c, d_c, sizeof(float) * n * n, hipMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
//TODO Update hipHostFree or hipFree (if necessary)
free(a);
//TODO Update hipHostFree or hipFree (if necessary)
free(b);
//TODO Update hipHostFree or hipFree (if necessary)
free(c);
free(g);
//TODO Remove if unecessary
gpuErrchk(hipFree(d_a));
//TODO Remove if unecessary
gpuErrchk(hipFree(d_b));
//TODO Remove if unecessary
gpuErrchk(hipFree(d_c));
return 0;
}
| b4b57a1b823f5c53a5bf27ba689204a10bf6f6d4.cu | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file gemm.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief GEMM Kernel
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define SM 64
static void reorder(float *__restrict__ a, float *__restrict__ b, int n)
{
for (int i = 0; i < SM; i++)
for (int j = 0; j < SM; j++)
b[i * SM + j] = a[i * n + j];
}
static void mm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
for (int i = 0; i < SM; i++)
{
for (int k = 0; k < SM; k++)
{
for (int j = 0; j < SM; j++)
{
c[i * n + j] += a[i * n + k] * b[k * SM + j];
}
}
}
}
void gemm_host(float *a, float *b, float *c, int n)
{
int bk = n / SM;
#pragma omp parallel for collapse(3)
for (int i = 0; i < bk; i++)
{
for (int j = 0; j < bk; j++)
{
for (int k = 0; k < bk; k++)
{
float b2[SM * SM];
reorder(&b[SM * (k * n + j)], b2, n);
mm(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n);
}
}
}
}
__global__ void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int ib = blockIdx.y;
int jb = blockIdx.x;
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
float Cvalue = 0.0f;
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
a_offset = ib * n * BLOCK_SIZE + kb * BLOCK_SIZE;
b_offset = kb * n * BLOCK_SIZE + jb * BLOCK_SIZE;
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Cvalue += As[it][k] * Bs[k][jt];
__syncthreads();
}
c_offset = ib * n * BLOCK_SIZE + jb * BLOCK_SIZE;
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary)
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
//TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary)
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
//TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary)
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(a);
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(b);
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(c);
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm_host(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
//TODO Remove if unecessary
float *d_a, *d_b, *d_c;
gpuErrchk(cudaMalloc((void **)&d_a, sizeof(float) * n * n));
gpuErrchk(cudaMalloc((void **)&d_b, sizeof(float) * n * n));
gpuErrchk(cudaMalloc((void **)&d_c, sizeof(float) * n * n));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Remove if unecessary
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
gemm<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
gpuErrchk(cudaPeekAtLastError());
//TODO Remove if unecessary
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(a);
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(b);
//TODO Update cudaFreeHost or cudaFree (if necessary)
free(c);
free(g);
//TODO Remove if unecessary
gpuErrchk(cudaFree(d_a));
//TODO Remove if unecessary
gpuErrchk(cudaFree(d_b));
//TODO Remove if unecessary
gpuErrchk(cudaFree(d_c));
return 0;
}
|
47e94c80c2d18dbf25ce0672cef4a12304a4e002.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* function for projecting lidar points
*
*/
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "../common.h"
__global__ void LinearInterpolateKernel(const float* const imageIn,
float* const out,
const size_t height,
const size_t width,
const float* const x,
const float* const y,
const size_t numPoints){
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numPoints){
return;
}
if((x[i] < 0) || (y[i] < 0) || (x[i] >= (width-1)) || (y[i] >= (height-1))){
out[i] = 0;
return;
}
int xF = (int)x[i];
int yF = (int)y[i];
float xD = x[i] - (float)xF;
float yD = y[i] - (float)yF;
//linear interpolate
out[i] = (1-yD)*(1-xD)*imageIn[yF + xF*height] +
(1-yD)*xD*imageIn[yF + (xF+1)*height] +
yD*(1-xD)*imageIn[yF+1 + xF*height] +
yD*xD*imageIn[yF+1 + (xF+1)*height];
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
//initialize the MathWorks GPU API.
mxInitGPU();
//read data
mxGPUArray const * image = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[1]);
size_t imageWidth = mxGPUGetDimensions(image)[1];
size_t imageHeight = mxGPUGetDimensions(image)[0];
size_t numPoints = mxGPUGetDimensions(points)[0];
size_t imageDepth = 1;
if(mxGPUGetNumberOfDimensions(image) > 2){
imageDepth = mxGPUGetDimensions(image)[2];
}
//create pointers from data
float* imagePtr = (float*)(mxGPUGetDataReadOnly(image));
float* xPtr = (float*)(mxGPUGetDataReadOnly(points));
float* yPtr = &(xPtr[numPoints]);
//create output
mwSize outSize[] = {numPoints,imageDepth};
mxGPUArray* out = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
float* outPtr = (float*)(mxGPUGetDataReadOnly(out));
//run and get ouputs
for(size_t i = 0; i < imageDepth; i++){
float* imageLayerPtr = &(imagePtr[imageHeight*imageWidth*i]);
float* outLayerPtr = &(outPtr[numPoints*i]);
hipLaunchKernelGGL(( LinearInterpolateKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, imageLayerPtr, outLayerPtr, imageHeight, imageWidth, xPtr, yPtr, numPoints);
CudaCheckError();
}
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
//destroy reference structures
mxGPUDestroyGPUArray(points);
mxGPUDestroyGPUArray(image);
mxGPUDestroyGPUArray(out);
}
| 47e94c80c2d18dbf25ce0672cef4a12304a4e002.cu | /* function for projecting lidar points
*
*/
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "../common.h"
__global__ void LinearInterpolateKernel(const float* const imageIn,
float* const out,
const size_t height,
const size_t width,
const float* const x,
const float* const y,
const size_t numPoints){
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numPoints){
return;
}
if((x[i] < 0) || (y[i] < 0) || (x[i] >= (width-1)) || (y[i] >= (height-1))){
out[i] = 0;
return;
}
int xF = (int)x[i];
int yF = (int)y[i];
float xD = x[i] - (float)xF;
float yD = y[i] - (float)yF;
//linear interpolate
out[i] = (1-yD)*(1-xD)*imageIn[yF + xF*height] +
(1-yD)*xD*imageIn[yF + (xF+1)*height] +
yD*(1-xD)*imageIn[yF+1 + xF*height] +
yD*xD*imageIn[yF+1 + (xF+1)*height];
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
//initialize the MathWorks GPU API.
mxInitGPU();
//read data
mxGPUArray const * image = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[1]);
size_t imageWidth = mxGPUGetDimensions(image)[1];
size_t imageHeight = mxGPUGetDimensions(image)[0];
size_t numPoints = mxGPUGetDimensions(points)[0];
size_t imageDepth = 1;
if(mxGPUGetNumberOfDimensions(image) > 2){
imageDepth = mxGPUGetDimensions(image)[2];
}
//create pointers from data
float* imagePtr = (float*)(mxGPUGetDataReadOnly(image));
float* xPtr = (float*)(mxGPUGetDataReadOnly(points));
float* yPtr = &(xPtr[numPoints]);
//create output
mwSize outSize[] = {numPoints,imageDepth};
mxGPUArray* out = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
float* outPtr = (float*)(mxGPUGetDataReadOnly(out));
//run and get ouputs
for(size_t i = 0; i < imageDepth; i++){
float* imageLayerPtr = &(imagePtr[imageHeight*imageWidth*i]);
float* outLayerPtr = &(outPtr[numPoints*i]);
LinearInterpolateKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(imageLayerPtr, outLayerPtr, imageHeight, imageWidth, xPtr, yPtr, numPoints);
CudaCheckError();
}
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
//destroy reference structures
mxGPUDestroyGPUArray(points);
mxGPUDestroyGPUArray(image);
mxGPUDestroyGPUArray(out);
}
|
0f7dbb861e42b19251b8ee22e008400cee37f031.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EXAMPLE OF SQUARE MATRIX MULTIPLICATION CHAPTER 4
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 16
#define DIM 1024
__global__
void matrixMulKernel(float *P, float *M, float *N) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH*2];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if ( Row < DIM && Col < DIM ) {
float pValue = 0;
float pValue2 = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < DIM/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * DIM + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * DIM + Col];
Nds[ty][tx+TILE_WIDTH] = N[(ph * TILE_WIDTH + ty) * DIM + Col + (DIM/2)];
// printf("ph = %d; block[%d,%d]; thread[%d,%d] --> Nds[0][%d] = %2.2f\n", ph, blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, tx, Nds[0][tx]);
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
//printf("ph = %d; block[%d,%d]; thread[%d,%d] --> %2.2f + %2.2f * %2.2f\n", ph, blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, pValue, Mds[ty][k], Nds[k][tx]);
pValue += Mds[ty][k] * Nds[k][tx];
pValue2 += Mds[ty][k] * Nds[k][tx+TILE_WIDTH];
}
__syncthreads();
}
P[Row*DIM+Col] = pValue;
P[Row*DIM+Col + (DIM/2)] = pValue2;
}
}
float matrixMul(float *h_P, float *h_M, float *h_N) {
int size = (DIM*DIM)*sizeof(float); // assume square matricies
float *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_M, d_N and d_P
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(hipMalloc((void**)&d_M, size));
CHECK_ERROR(hipMalloc((void**)&d_N, size));
CHECK_ERROR(hipMalloc((void**)&d_P, size));
// copy h_M and h_N to device memory
hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice);
hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice);
hipEvent_t startTimeCuda, stopTimeCuda;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
//2. Kernel launch code - with TILE_WIDTH^2 threads per block
hipEventRecord(startTimeCuda, 0);
dim3 dimGrid(ceil((DIM/TILE_WIDTH)/2.0), ceil(DIM/TILE_WIDTH), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P, d_M, d_N);
hipEventRecord(stopTimeCuda,0);
//3. copy d_P from the device memory
hipEventSynchronize(stopTimeCuda);
float msTime;
hipEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n", msTime);
hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost);
// Free device matricies
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
return msTime;
}
void sequentialMM(float* h_M, float* h_N, float* h_C) {
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
float sum = 0.0;
for (int k = 0; k < DIM; ++k)
sum += h_M[i * DIM + k] * h_N[k * DIM + j];
h_C[i * DIM + j] = sum;
}
}
}
int main(int argc, char *argv[]) {
float *h_M, *h_N, *h_P, *h_C;
float msTime, msTime_seq;
hipEvent_t startTimeCuda, stopTimeCuda;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
h_M = (float*)malloc(sizeof(float)*DIM*DIM);
h_N = (float*)malloc(sizeof(float)*DIM*DIM);
h_P = (float*)malloc(sizeof(float)*DIM*DIM);
h_C = (float*)malloc(sizeof(float)*DIM*DIM);
// fill M and N with float numbers
srand(time(NULL));
for (int i = 0; i < DIM ; i++) {
for (int j = 0; j < DIM ; j++) {
h_M[i*DIM+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_N[i*DIM+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_C[i] = 0.0;
}
}
// perform matrix multiplication
msTime = matrixMul(h_P, h_M, h_N);
// ------- perform matrix multiplication on host ---------
hipEventRecord(startTimeCuda, 0);
sequentialMM(h_M, h_N, h_C);
hipEventRecord(stopTimeCuda,0);
hipEventSynchronize(stopTimeCuda);
hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n", msTime_seq);
/********************************************************************************************************
* NON HA SENSO VERIFICARE LA CORRETTEZZA DEL RISULTATO SULL'HOST, VEDI 3.2 fino a 6.0 AL SEGUENTE LINK:
* http://docs.nvidia.com/cuda/floating-point/
********************************************************************************************************
// verify the result
for (int i = 0; i < DIM * DIM; ++i) {
if (h_C[i] != h_P[i]) {
printf("\x1b[31mError\x1b[0m into result: h_C[%d] = %f != %f = h_P[%d]\n", i, h_C[i], h_P[i], i);
goto Error;
}
}
*/
// Free host memory
free(h_M);
free(h_N);
free(h_P);
free(h_C);
printf("Ok multiplication completed with \x1b[32msuccess\x1b[0m!\n\n");
printf("Speedup: %f\n", msTime_seq/msTime);
return 0;
Error:
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return -1;
}
| 0f7dbb861e42b19251b8ee22e008400cee37f031.cu | /*
* EXAMPLE OF SQUARE MATRIX MULTIPLICATION CHAPTER 4
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <math.h>
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 16
#define DIM 1024
__global__
void matrixMulKernel(float *P, float *M, float *N) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH*2];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if ( Row < DIM && Col < DIM ) {
float pValue = 0;
float pValue2 = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < DIM/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * DIM + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * DIM + Col];
Nds[ty][tx+TILE_WIDTH] = N[(ph * TILE_WIDTH + ty) * DIM + Col + (DIM/2)];
// printf("ph = %d; block[%d,%d]; thread[%d,%d] --> Nds[0][%d] = %2.2f\n", ph, blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, tx, Nds[0][tx]);
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
//printf("ph = %d; block[%d,%d]; thread[%d,%d] --> %2.2f + %2.2f * %2.2f\n", ph, blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, pValue, Mds[ty][k], Nds[k][tx]);
pValue += Mds[ty][k] * Nds[k][tx];
pValue2 += Mds[ty][k] * Nds[k][tx+TILE_WIDTH];
}
__syncthreads();
}
P[Row*DIM+Col] = pValue;
P[Row*DIM+Col + (DIM/2)] = pValue2;
}
}
float matrixMul(float *h_P, float *h_M, float *h_N) {
int size = (DIM*DIM)*sizeof(float); // assume square matricies
float *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_M, d_N and d_P
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(cudaMalloc((void**)&d_M, size));
CHECK_ERROR(cudaMalloc((void**)&d_N, size));
CHECK_ERROR(cudaMalloc((void**)&d_P, size));
// copy h_M and h_N to device memory
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
cudaEvent_t startTimeCuda, stopTimeCuda;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
//2. Kernel launch code - with TILE_WIDTH^2 threads per block
cudaEventRecord(startTimeCuda, 0);
dim3 dimGrid(ceil((DIM/TILE_WIDTH)/2.0), ceil(DIM/TILE_WIDTH), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
matrixMulKernel<<<dimGrid, dimBlock>>>(d_P, d_M, d_N);
cudaEventRecord(stopTimeCuda,0);
//3. copy d_P from the device memory
cudaEventSynchronize(stopTimeCuda);
float msTime;
cudaEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n", msTime);
cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost);
// Free device matricies
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return msTime;
}
void sequentialMM(float* h_M, float* h_N, float* h_C) {
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
float sum = 0.0;
for (int k = 0; k < DIM; ++k)
sum += h_M[i * DIM + k] * h_N[k * DIM + j];
h_C[i * DIM + j] = sum;
}
}
}
int main(int argc, char *argv[]) {
float *h_M, *h_N, *h_P, *h_C;
float msTime, msTime_seq;
cudaEvent_t startTimeCuda, stopTimeCuda;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
h_M = (float*)malloc(sizeof(float)*DIM*DIM);
h_N = (float*)malloc(sizeof(float)*DIM*DIM);
h_P = (float*)malloc(sizeof(float)*DIM*DIM);
h_C = (float*)malloc(sizeof(float)*DIM*DIM);
// fill M and N with float numbers
srand(time(NULL));
for (int i = 0; i < DIM ; i++) {
for (int j = 0; j < DIM ; j++) {
h_M[i*DIM+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_N[i*DIM+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_C[i] = 0.0;
}
}
// perform matrix multiplication
msTime = matrixMul(h_P, h_M, h_N);
// ------- perform matrix multiplication on host ---------
cudaEventRecord(startTimeCuda, 0);
sequentialMM(h_M, h_N, h_C);
cudaEventRecord(stopTimeCuda,0);
cudaEventSynchronize(stopTimeCuda);
cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n", msTime_seq);
/********************************************************************************************************
* NON HA SENSO VERIFICARE LA CORRETTEZZA DEL RISULTATO SULL'HOST, VEDI 3.2 fino a 6.0 AL SEGUENTE LINK:
* http://docs.nvidia.com/cuda/floating-point/
********************************************************************************************************
// verify the result
for (int i = 0; i < DIM * DIM; ++i) {
if (h_C[i] != h_P[i]) {
printf("\x1b[31mError\x1b[0m into result: h_C[%d] = %f != %f = h_P[%d]\n", i, h_C[i], h_P[i], i);
goto Error;
}
}
*/
// Free host memory
free(h_M);
free(h_N);
free(h_P);
free(h_C);
printf("Ok multiplication completed with \x1b[32msuccess\x1b[0m!\n\n");
printf("Speedup: %f\n", msTime_seq/msTime);
return 0;
Error:
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return -1;
}
|
cfdea82d355605dd98dd129ad054cccb4759364c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* AES.cpp
*
* The Advanced Encryption Standard (AES, aka AES) block cipher,
* designed by J. Daemen and V. Rijmen.
*
* @author Paulo S. L. M. Barreto, Simon Waloschek, Benedikt Krueger
*
* This software is hereby placed in the public domain.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#ifdef BENCHMARK
#include <stdio.h>
#include <time.h>
#endif
#include "AES.h"
#include "AES.tab"
#define FULL_UNROLL
#ifdef _MSC_VER
#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
#define GETWORD(p) SWAP(*((uint *)(p)))
#define PUTWORD(ct, st) (*((uint *)(ct)) = SWAP((st)))
#else
#define GETWORD(pt) (((uint)(pt)[0] << 24) ^ ((uint)(pt)[1] << 16) ^ ((uint)(pt)[2] << 8) ^ ((uint)(pt)[3]))
#define PUTWORD(ct, st) ((ct)[0] = (byte)((st) >> 24), (ct)[1] = (byte)((st) >> 16), (ct)[2] = (byte)((st) >> 8), (ct)[3] = (byte)(st), (st))
#endif
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
AES::AES() {
hipMalloc((void**)&ce_sched, sizeof(e_sched));
hipMalloc((void**)&cd_sched, sizeof(d_sched));
}
AES::~AES() {
Nr = 0;
memset(e_sched, 0, sizeof(e_sched));
memset(d_sched, 0, sizeof(d_sched));
hipFree(ce_sched);
hipFree(cd_sched);
}
//////////////////////////////////////////////////////////////////////
// Support methods
//////////////////////////////////////////////////////////////////////
void AES::ExpandKey(const byte *cipherKey, uint keyBits) {
uint *rek = e_sched;
uint i = 0;
uint temp;
rek[0] = GETWORD(cipherKey );
rek[1] = GETWORD(cipherKey + 4);
rek[2] = GETWORD(cipherKey + 8);
rek[3] = GETWORD(cipherKey + 12);
if (keyBits == 128) {
for (;;) {
temp = rek[3];
rek[4] = rek[0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[5] = rek[1] ^ rek[4];
rek[6] = rek[2] ^ rek[5];
rek[7] = rek[3] ^ rek[6];
if (++i == 10) {
Nr = 10;
return;
}
rek += 4;
}
}
rek[4] = GETWORD(cipherKey + 16);
rek[5] = GETWORD(cipherKey + 20);
if (keyBits == 192) {
for (;;) {
temp = rek[ 5];
rek[ 6] = rek[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[ 7] = rek[ 1] ^ rek[ 6];
rek[ 8] = rek[ 2] ^ rek[ 7];
rek[ 9] = rek[ 3] ^ rek[ 8];
if (++i == 8) {
Nr = 12;
return;
}
rek[10] = rek[ 4] ^ rek[ 9];
rek[11] = rek[ 5] ^ rek[10];
rek += 6;
}
}
rek[6] = GETWORD(cipherKey + 24);
rek[7] = GETWORD(cipherKey + 28);
if (keyBits == 256) {
for (;;) {
temp = rek[ 7];
rek[ 8] = rek[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[ 9] = rek[ 1] ^ rek[ 8];
rek[10] = rek[ 2] ^ rek[ 9];
rek[11] = rek[ 3] ^ rek[10];
if (++i == 7) {
Nr = 14;
return;
}
temp = rek[11];
rek[12] = rek[ 4] ^
(Te4[(temp >> 24) ] & 0xff000000) ^
(Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(temp ) & 0xff] & 0x000000ff);
rek[13] = rek[ 5] ^ rek[12];
rek[14] = rek[ 6] ^ rek[13];
rek[15] = rek[ 7] ^ rek[14];
rek += 8;
}
}
Nr = 0; // this should never happen
}
void AES::InvertKey() {
uint *rek = e_sched;
uint *rdk = d_sched;
assert(Nr == 10 || Nr == 12 || Nr == 14);
rek += 4*Nr;
/* apply the inverse MixColumn transform to all round keys but the first and the last: */
memcpy(rdk, rek, 16);
rdk += 4;
rek -= 4;
for (uint r = 1; r < Nr; r++) {
rdk[0] =
Td0[Te4[(rek[0] >> 24) ] & 0xff] ^
Td1[Te4[(rek[0] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[0] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[0] ) & 0xff] & 0xff];
rdk[1] =
Td0[Te4[(rek[1] >> 24) ] & 0xff] ^
Td1[Te4[(rek[1] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[1] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[1] ) & 0xff] & 0xff];
rdk[2] =
Td0[Te4[(rek[2] >> 24) ] & 0xff] ^
Td1[Te4[(rek[2] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[2] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[2] ) & 0xff] & 0xff];
rdk[3] =
Td0[Te4[(rek[3] >> 24) ] & 0xff] ^
Td1[Te4[(rek[3] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[3] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[3] ) & 0xff] & 0xff];
rdk += 4;
rek -= 4;
}
memcpy(rdk, rek, 16);
}
//////////////////////////////////////////////////////////////////////
// Public Interface
//////////////////////////////////////////////////////////////////////
void AES::byte2int(const byte *b, uint *i) {
i[0] = GETWORD(b );
i[1] = GETWORD(b + 4);
i[2] = GETWORD(b + 8);
i[3] = GETWORD(b + 12);
}
void AES::int2byte(const uint *i, byte *b) {
PUTWORD(b , i[0]);
PUTWORD(b + 4, i[1]);
PUTWORD(b + 8, i[2]);
PUTWORD(b + 12, i[3]);
}
void AES::makeKey(const byte *cipherKey, uint keySize, uint dir) {
switch (keySize) {
case 16:
case 24:
case 32:
keySize <<= 3;
break;
case 128:
case 192:
case 256:
break;
default:
throw "Invalid AES key size";
}
assert(dir <= DIR_BOTH);
if (dir != DIR_NONE) {
ExpandKey(cipherKey, keySize);
hipMemcpy(ce_sched, e_sched, sizeof(e_sched), hipMemcpyHostToDevice);
if (dir & DIR_DECRYPT) {
InvertKey();
hipMemcpy(cd_sched, d_sched, sizeof(e_sched), hipMemcpyHostToDevice);
}
}
}
void AES::encrypt(const uint *pt, uint *ct, uint n = 1) {
uint *cpt, *cct;
uint size = (n << 2)*sizeof(uint);
hipMalloc((void**)&cpt, size);
hipMalloc((void**)&cct, size);
hipMemcpy(cpt, pt, size, hipMemcpyHostToDevice);
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
uint blocks, threads = 1;
if(n != 1) {
threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock;
}
blocks = n / threads;
dim3 dimBlock(threads);
dim3 dimGrid(blocks);
hipLaunchKernelGGL(( AES_encrypt), dim3(dimGrid), dim3(dimBlock), size, 0, cpt, cct, ce_sched, Nr);
hipMemcpy(ct, cct, size, hipMemcpyDeviceToHost);
hipFree(cpt);
hipFree(cct);
}
void AES::decrypt(const uint *ct, uint *pt, uint n = 1) {
uint *cpt, *cct;
uint size = (n << 2)*sizeof(uint);
hipMalloc((void**)&cpt, size);
hipMalloc((void**)&cct, size);
hipMemcpy(cct, ct, size, hipMemcpyHostToDevice);
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
uint blocks, threads = 1;
if(n != 1) {
threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock;
}
blocks = n / threads;
dim3 dimBlock(threads);
dim3 dimGrid(blocks);
hipLaunchKernelGGL(( AES_decrypt), dim3(dimGrid), dim3(dimBlock), size, 0, cct, cpt, cd_sched, Nr);
hipMemcpy(pt, cpt, size, hipMemcpyDeviceToHost);
hipFree(cpt);
hipFree(cct);
}
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int i = x + y * gridDim.x * blockDim.x;
int offset = i << 2;
__shared__ __device__ uint s0, s1, s2, s3, t0, t1, t2, t3;
s0 = pt[offset + 0] ^ rek[0];
s1 = pt[offset + 1] ^ rek[1];
s2 = pt[offset + 2] ^ rek[2];
s3 = pt[offset + 3] ^ rek[3];
/* round 1: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[ 4];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[ 5];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[ 6];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[ 7];
/* round 2: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[ 8];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[ 9];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[10];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[11];
/* round 3: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[12];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[13];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[14];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[15];
/* round 4: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[16];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[17];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[18];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[19];
/* round 5: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[20];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[21];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[22];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[23];
/* round 6: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[24];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[25];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[26];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[27];
/* round 7: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[28];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[29];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[30];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[31];
/* round 8: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[32];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[33];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[34];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[35];
/* round 9: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[36];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[37];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[38];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[39];
if (Nr > 10) {
/* round 10: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[40];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[41];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[42];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[43];
/* round 11: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[44];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[45];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[46];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[47];
if (Nr > 12) {
/* round 12: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[48];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[49];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[50];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[51];
/* round 13: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[52];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[53];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[54];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[55];
}
}
rek += Nr << 2;
ct[offset + 0] =
(cTe4[(t0 >> 24) ] & 0xff000000) ^
(cTe4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t3 ) & 0xff] & 0x000000ff) ^
rek[0];
ct[offset + 1] =
(cTe4[(t1 >> 24) ] & 0xff000000) ^
(cTe4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t0 ) & 0xff] & 0x000000ff) ^
rek[1];
ct[offset + 2] =
(cTe4[(t2 >> 24) ] & 0xff000000) ^
(cTe4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t1 ) & 0xff] & 0x000000ff) ^
rek[2];
ct[offset + 3] =
(cTe4[(t3 >> 24) ] & 0xff000000) ^
(cTe4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t2 ) & 0xff] & 0x000000ff) ^
rek[3];
}
__global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int i = x + y * gridDim.x * blockDim.x;
int offset = i << 2;
__shared__ __device__ uint s0, s1, s2, s3, t0, t1, t2, t3;
s0 = ct[offset + 0] ^ rdk[0];
s1 = ct[offset + 1] ^ rdk[1];
s2 = ct[offset + 2] ^ rdk[2];
s3 = ct[offset + 3] ^ rdk[3];
/* round 1: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7];
/* round 2: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11];
/* round 3: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15];
/* round 4: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19];
/* round 5: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23];
/* round 6: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27];
/* round 7: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31];
/* round 8: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35];
/* round 9: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39];
if (Nr > 10) {
/* round 10: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43];
/* round 11: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47];
if (Nr > 12) {
/* round 12: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51];
/* round 13: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55];
}
}
rdk += Nr << 2;
pt[offset + 0] =
(cTd4[(t0 >> 24) ] & 0xff000000) ^
(cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t1 ) & 0xff] & 0x000000ff) ^
rdk[0];
pt[offset + 1] =
(cTd4[(t1 >> 24) ] & 0xff000000) ^
(cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t2 ) & 0xff] & 0x000000ff) ^
rdk[1];
pt[offset + 2] =
(cTd4[(t2 >> 24) ] & 0xff000000) ^
(cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t3 ) & 0xff] & 0x000000ff) ^
rdk[2];
pt[offset + 3] =
(cTd4[(t3 >> 24) ] & 0xff000000) ^
(cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t0 ) & 0xff] & 0x000000ff) ^
rdk[3];
}
| cfdea82d355605dd98dd129ad054cccb4759364c.cu | /**
* AES.cpp
*
* The Advanced Encryption Standard (AES, aka AES) block cipher,
* designed by J. Daemen and V. Rijmen.
*
* @author Paulo S. L. M. Barreto, Simon Waloschek, Benedikt Krueger
*
* This software is hereby placed in the public domain.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#ifdef BENCHMARK
#include <stdio.h>
#include <time.h>
#endif
#include "AES.h"
#include "AES.tab"
#define FULL_UNROLL
#ifdef _MSC_VER
#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
#define GETWORD(p) SWAP(*((uint *)(p)))
#define PUTWORD(ct, st) (*((uint *)(ct)) = SWAP((st)))
#else
#define GETWORD(pt) (((uint)(pt)[0] << 24) ^ ((uint)(pt)[1] << 16) ^ ((uint)(pt)[2] << 8) ^ ((uint)(pt)[3]))
#define PUTWORD(ct, st) ((ct)[0] = (byte)((st) >> 24), (ct)[1] = (byte)((st) >> 16), (ct)[2] = (byte)((st) >> 8), (ct)[3] = (byte)(st), (st))
#endif
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
AES::AES() {
cudaMalloc((void**)&ce_sched, sizeof(e_sched));
cudaMalloc((void**)&cd_sched, sizeof(d_sched));
}
AES::~AES() {
Nr = 0;
memset(e_sched, 0, sizeof(e_sched));
memset(d_sched, 0, sizeof(d_sched));
cudaFree(ce_sched);
cudaFree(cd_sched);
}
//////////////////////////////////////////////////////////////////////
// Support methods
//////////////////////////////////////////////////////////////////////
void AES::ExpandKey(const byte *cipherKey, uint keyBits) {
uint *rek = e_sched;
uint i = 0;
uint temp;
rek[0] = GETWORD(cipherKey );
rek[1] = GETWORD(cipherKey + 4);
rek[2] = GETWORD(cipherKey + 8);
rek[3] = GETWORD(cipherKey + 12);
if (keyBits == 128) {
for (;;) {
temp = rek[3];
rek[4] = rek[0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[5] = rek[1] ^ rek[4];
rek[6] = rek[2] ^ rek[5];
rek[7] = rek[3] ^ rek[6];
if (++i == 10) {
Nr = 10;
return;
}
rek += 4;
}
}
rek[4] = GETWORD(cipherKey + 16);
rek[5] = GETWORD(cipherKey + 20);
if (keyBits == 192) {
for (;;) {
temp = rek[ 5];
rek[ 6] = rek[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[ 7] = rek[ 1] ^ rek[ 6];
rek[ 8] = rek[ 2] ^ rek[ 7];
rek[ 9] = rek[ 3] ^ rek[ 8];
if (++i == 8) {
Nr = 12;
return;
}
rek[10] = rek[ 4] ^ rek[ 9];
rek[11] = rek[ 5] ^ rek[10];
rek += 6;
}
}
rek[6] = GETWORD(cipherKey + 24);
rek[7] = GETWORD(cipherKey + 28);
if (keyBits == 256) {
for (;;) {
temp = rek[ 7];
rek[ 8] = rek[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rek[ 9] = rek[ 1] ^ rek[ 8];
rek[10] = rek[ 2] ^ rek[ 9];
rek[11] = rek[ 3] ^ rek[10];
if (++i == 7) {
Nr = 14;
return;
}
temp = rek[11];
rek[12] = rek[ 4] ^
(Te4[(temp >> 24) ] & 0xff000000) ^
(Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(temp ) & 0xff] & 0x000000ff);
rek[13] = rek[ 5] ^ rek[12];
rek[14] = rek[ 6] ^ rek[13];
rek[15] = rek[ 7] ^ rek[14];
rek += 8;
}
}
Nr = 0; // this should never happen
}
void AES::InvertKey() {
uint *rek = e_sched;
uint *rdk = d_sched;
assert(Nr == 10 || Nr == 12 || Nr == 14);
rek += 4*Nr;
/* apply the inverse MixColumn transform to all round keys but the first and the last: */
memcpy(rdk, rek, 16);
rdk += 4;
rek -= 4;
for (uint r = 1; r < Nr; r++) {
rdk[0] =
Td0[Te4[(rek[0] >> 24) ] & 0xff] ^
Td1[Te4[(rek[0] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[0] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[0] ) & 0xff] & 0xff];
rdk[1] =
Td0[Te4[(rek[1] >> 24) ] & 0xff] ^
Td1[Te4[(rek[1] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[1] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[1] ) & 0xff] & 0xff];
rdk[2] =
Td0[Te4[(rek[2] >> 24) ] & 0xff] ^
Td1[Te4[(rek[2] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[2] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[2] ) & 0xff] & 0xff];
rdk[3] =
Td0[Te4[(rek[3] >> 24) ] & 0xff] ^
Td1[Te4[(rek[3] >> 16) & 0xff] & 0xff] ^
Td2[Te4[(rek[3] >> 8) & 0xff] & 0xff] ^
Td3[Te4[(rek[3] ) & 0xff] & 0xff];
rdk += 4;
rek -= 4;
}
memcpy(rdk, rek, 16);
}
//////////////////////////////////////////////////////////////////////
// Public Interface
//////////////////////////////////////////////////////////////////////
void AES::byte2int(const byte *b, uint *i) {
i[0] = GETWORD(b );
i[1] = GETWORD(b + 4);
i[2] = GETWORD(b + 8);
i[3] = GETWORD(b + 12);
}
void AES::int2byte(const uint *i, byte *b) {
PUTWORD(b , i[0]);
PUTWORD(b + 4, i[1]);
PUTWORD(b + 8, i[2]);
PUTWORD(b + 12, i[3]);
}
void AES::makeKey(const byte *cipherKey, uint keySize, uint dir) {
switch (keySize) {
case 16:
case 24:
case 32:
keySize <<= 3;
break;
case 128:
case 192:
case 256:
break;
default:
throw "Invalid AES key size";
}
assert(dir <= DIR_BOTH);
if (dir != DIR_NONE) {
ExpandKey(cipherKey, keySize);
cudaMemcpy(ce_sched, e_sched, sizeof(e_sched), cudaMemcpyHostToDevice);
if (dir & DIR_DECRYPT) {
InvertKey();
cudaMemcpy(cd_sched, d_sched, sizeof(e_sched), cudaMemcpyHostToDevice);
}
}
}
void AES::encrypt(const uint *pt, uint *ct, uint n = 1) {
uint *cpt, *cct;
uint size = (n << 2)*sizeof(uint);
cudaMalloc((void**)&cpt, size);
cudaMalloc((void**)&cct, size);
cudaMemcpy(cpt, pt, size, cudaMemcpyHostToDevice);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint blocks, threads = 1;
if(n != 1) {
threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock;
}
blocks = n / threads;
dim3 dimBlock(threads);
dim3 dimGrid(blocks);
AES_encrypt<<<dimGrid, dimBlock, size>>>(cpt, cct, ce_sched, Nr);
cudaMemcpy(ct, cct, size, cudaMemcpyDeviceToHost);
cudaFree(cpt);
cudaFree(cct);
}
void AES::decrypt(const uint *ct, uint *pt, uint n = 1) {
uint *cpt, *cct;
uint size = (n << 2)*sizeof(uint);
cudaMalloc((void**)&cpt, size);
cudaMalloc((void**)&cct, size);
cudaMemcpy(cct, ct, size, cudaMemcpyHostToDevice);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint blocks, threads = 1;
if(n != 1) {
threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock;
}
blocks = n / threads;
dim3 dimBlock(threads);
dim3 dimGrid(blocks);
AES_decrypt<<<dimGrid, dimBlock, size>>>(cct, cpt, cd_sched, Nr);
cudaMemcpy(pt, cpt, size, cudaMemcpyDeviceToHost);
cudaFree(cpt);
cudaFree(cct);
}
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int i = x + y * gridDim.x * blockDim.x;
int offset = i << 2;
__shared__ __device__ uint s0, s1, s2, s3, t0, t1, t2, t3;
s0 = pt[offset + 0] ^ rek[0];
s1 = pt[offset + 1] ^ rek[1];
s2 = pt[offset + 2] ^ rek[2];
s3 = pt[offset + 3] ^ rek[3];
/* round 1: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[ 4];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[ 5];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[ 6];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[ 7];
/* round 2: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[ 8];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[ 9];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[10];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[11];
/* round 3: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[12];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[13];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[14];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[15];
/* round 4: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[16];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[17];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[18];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[19];
/* round 5: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[20];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[21];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[22];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[23];
/* round 6: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[24];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[25];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[26];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[27];
/* round 7: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[28];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[29];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[30];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[31];
/* round 8: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[32];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[33];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[34];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[35];
/* round 9: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[36];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[37];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[38];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[39];
if (Nr > 10) {
/* round 10: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[40];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[41];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[42];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[43];
/* round 11: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[44];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[45];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[46];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[47];
if (Nr > 12) {
/* round 12: */
s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[48];
s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[49];
s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[50];
s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[51];
/* round 13: */
t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[52];
t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[53];
t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[54];
t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[55];
}
}
rek += Nr << 2;
ct[offset + 0] =
(cTe4[(t0 >> 24) ] & 0xff000000) ^
(cTe4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t3 ) & 0xff] & 0x000000ff) ^
rek[0];
ct[offset + 1] =
(cTe4[(t1 >> 24) ] & 0xff000000) ^
(cTe4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t0 ) & 0xff] & 0x000000ff) ^
rek[1];
ct[offset + 2] =
(cTe4[(t2 >> 24) ] & 0xff000000) ^
(cTe4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t1 ) & 0xff] & 0x000000ff) ^
rek[2];
ct[offset + 3] =
(cTe4[(t3 >> 24) ] & 0xff000000) ^
(cTe4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(cTe4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(cTe4[(t2 ) & 0xff] & 0x000000ff) ^
rek[3];
}
__global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int i = x + y * gridDim.x * blockDim.x;
int offset = i << 2;
__shared__ __device__ uint s0, s1, s2, s3, t0, t1, t2, t3;
s0 = ct[offset + 0] ^ rdk[0];
s1 = ct[offset + 1] ^ rdk[1];
s2 = ct[offset + 2] ^ rdk[2];
s3 = ct[offset + 3] ^ rdk[3];
/* round 1: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7];
/* round 2: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11];
/* round 3: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15];
/* round 4: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19];
/* round 5: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23];
/* round 6: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27];
/* round 7: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31];
/* round 8: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35];
/* round 9: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39];
if (Nr > 10) {
/* round 10: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43];
/* round 11: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47];
if (Nr > 12) {
/* round 12: */
s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48];
s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49];
s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50];
s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51];
/* round 13: */
t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52];
t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53];
t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54];
t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55];
}
}
rdk += Nr << 2;
pt[offset + 0] =
(cTd4[(t0 >> 24) ] & 0xff000000) ^
(cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t1 ) & 0xff] & 0x000000ff) ^
rdk[0];
pt[offset + 1] =
(cTd4[(t1 >> 24) ] & 0xff000000) ^
(cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t2 ) & 0xff] & 0x000000ff) ^
rdk[1];
pt[offset + 2] =
(cTd4[(t2 >> 24) ] & 0xff000000) ^
(cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t3 ) & 0xff] & 0x000000ff) ^
rdk[2];
pt[offset + 3] =
(cTd4[(t3 >> 24) ] & 0xff000000) ^
(cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(cTd4[(t0 ) & 0xff] & 0x000000ff) ^
rdk[3];
}
|
40f8059c97022b965945cae4c4fdecc4d52a5fca.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define TILE_WIDTH 32
// kernel
__global__ void tiledMultiplyMatricesKernel(float* d_x, float* d_y, float* d_z, int m, int n, int p)
{
__shared__ float tile_x[TILE_WIDTH][TILE_WIDTH];
__shared__ float tile_y[TILE_WIDTH][TILE_WIDTH];
// indexing variables
int rowNum = blockIdx.y * blockDim.y + threadIdx.y;
int colNum = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for(int i = 0; i < n / TILE_WIDTH; ++i)
{
tile_x[threadIdx.y][threadIdx.x] = d_x[rowNum * n + i * TILE_WIDTH + threadIdx.x];
tile_y[threadIdx.y][threadIdx.x] = d_y[(i * TILE_WIDTH + threadIdx.y) * p + colNum];
__syncthreads();
for(int j = 0; j < TILE_WIDTH; ++j)
{
result += tile_x[threadIdx.y][j] * tile_y[j][threadIdx.x];
}
__syncthreads();
}
// write output
d_z[rowNum * p + colNum] = result;
}
// CUDA error checking
void errorCheck(unsigned int line)
{
hipError_t hipError_t = hipGetLastError();
if(hipError_t != hipSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, hipGetErrorString(hipError_t));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void multiplyMatrices(float* x, float* y, float* z, int m, int n, int p)
{
dim3 numOfBlocks(ceil(p / (float) TILE_WIDTH), ceil(m / (float) TILE_WIDTH), 1);
dim3 numOfThreads(TILE_WIDTH, TILE_WIDTH, 1);
size_t bytes_x = m * n * sizeof(float);
size_t bytes_y = n * p * sizeof(float);
size_t bytes_z = m * p * sizeof(float);
float* d_x;
float* d_y;
float* d_z;
hipMalloc((void**) &d_x, bytes_x);
errorCheck(__LINE__);
hipMalloc((void**) &d_y, bytes_y);
errorCheck(__LINE__);
hipMalloc((void**) &d_z, bytes_z);
errorCheck(__LINE__);
hipMemcpy(d_x, x, bytes_x, hipMemcpyHostToDevice);
errorCheck(__LINE__);
hipMemcpy(d_y, y, bytes_y, hipMemcpyHostToDevice);
errorCheck(__LINE__);
hipLaunchKernelGGL(( tiledMultiplyMatricesKernel), dim3(numOfBlocks), dim3(numOfThreads), 0, 0, d_x, d_y, d_z, m, n, p);
errorCheck(__LINE__);
hipMemcpy(z, d_z, bytes_z, hipMemcpyDeviceToHost);
errorCheck(__LINE__);
hipFree(d_x);
errorCheck(__LINE__);
hipFree(d_y);
errorCheck(__LINE__);
hipFree(d_z);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t m = 4096;
size_t n = 4096;
size_t p = 4096;
float* x = (float*) malloc(m * n * sizeof(float));
float* y = (float*) malloc(n * p * sizeof(float));
float* z = (float*) malloc(m * p * sizeof(float));
for(int i = 0; i < m * n; ++i)
{
x[i] = rand() % 129 - 64;
}
for(int i = 0; i < n * p; ++i)
{
y[i] = rand() % 129 - 64;
}
clock_gettime(CLOCK_REALTIME, &start);
// do matrix multiplication
multiplyMatrices(x, y, z, m, n, p);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
| 40f8059c97022b965945cae4c4fdecc4d52a5fca.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define TILE_WIDTH 32
// kernel
__global__ void tiledMultiplyMatricesKernel(float* d_x, float* d_y, float* d_z, int m, int n, int p)
{
__shared__ float tile_x[TILE_WIDTH][TILE_WIDTH];
__shared__ float tile_y[TILE_WIDTH][TILE_WIDTH];
// indexing variables
int rowNum = blockIdx.y * blockDim.y + threadIdx.y;
int colNum = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for(int i = 0; i < n / TILE_WIDTH; ++i)
{
tile_x[threadIdx.y][threadIdx.x] = d_x[rowNum * n + i * TILE_WIDTH + threadIdx.x];
tile_y[threadIdx.y][threadIdx.x] = d_y[(i * TILE_WIDTH + threadIdx.y) * p + colNum];
__syncthreads();
for(int j = 0; j < TILE_WIDTH; ++j)
{
result += tile_x[threadIdx.y][j] * tile_y[j][threadIdx.x];
}
__syncthreads();
}
// write output
d_z[rowNum * p + colNum] = result;
}
// CUDA error checking
void errorCheck(unsigned int line)
{
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void multiplyMatrices(float* x, float* y, float* z, int m, int n, int p)
{
dim3 numOfBlocks(ceil(p / (float) TILE_WIDTH), ceil(m / (float) TILE_WIDTH), 1);
dim3 numOfThreads(TILE_WIDTH, TILE_WIDTH, 1);
size_t bytes_x = m * n * sizeof(float);
size_t bytes_y = n * p * sizeof(float);
size_t bytes_z = m * p * sizeof(float);
float* d_x;
float* d_y;
float* d_z;
cudaMalloc((void**) &d_x, bytes_x);
errorCheck(__LINE__);
cudaMalloc((void**) &d_y, bytes_y);
errorCheck(__LINE__);
cudaMalloc((void**) &d_z, bytes_z);
errorCheck(__LINE__);
cudaMemcpy(d_x, x, bytes_x, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
cudaMemcpy(d_y, y, bytes_y, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
tiledMultiplyMatricesKernel<<<numOfBlocks, numOfThreads>>>(d_x, d_y, d_z, m, n, p);
errorCheck(__LINE__);
cudaMemcpy(z, d_z, bytes_z, cudaMemcpyDeviceToHost);
errorCheck(__LINE__);
cudaFree(d_x);
errorCheck(__LINE__);
cudaFree(d_y);
errorCheck(__LINE__);
cudaFree(d_z);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t m = 4096;
size_t n = 4096;
size_t p = 4096;
float* x = (float*) malloc(m * n * sizeof(float));
float* y = (float*) malloc(n * p * sizeof(float));
float* z = (float*) malloc(m * p * sizeof(float));
for(int i = 0; i < m * n; ++i)
{
x[i] = rand() % 129 - 64;
}
for(int i = 0; i < n * p; ++i)
{
y[i] = rand() % 129 - 64;
}
clock_gettime(CLOCK_REALTIME, &start);
// do matrix multiplication
multiplyMatrices(x, y, z, m, n, p);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
|
89dddd385794c3933bd49d8ed44c28a4ceb8d007.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl2.cu, normal z -> c, Tue Aug 30 09:38:32 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_clascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_clascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( clascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( clascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( clascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
| 89dddd385794c3933bd49d8ed44c28a4ceb8d007.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl2.cu, normal z -> c, Tue Aug 30 09:38:32 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_clascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_clascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
clascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
clascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
clascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
36444a06d2eec7dff3a28e951e18d8b8623810c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
MultiKernel2barrier
============================
process0 | process1 process2
----------|-------------------------
x=1 | y=2
proBarrier proBarrier proBarrier
----------|-------------------------
print(y=2)| print(x=1)
proBarrier proBarrier proBarrier
----------|-------------------------
proBarrier proBarrier proBarrier
----------|-------------------------
==================================
barrier
*/
#include<cassert>
#include<cstdio>
#include<iostream>
#include<helper_cuda.h>
using namespace std;
#ifdef __linux
#include<unistd.h>
#include<sched.h>
#include<sys/mman.h>
#include<sys/wait.h>
#include<sys/syscall.h>
#include<linux/version.h>
#endif
//-------------value,datatype
#define MAX_DEVICES 8
#define PROCESSES_PRE_DEVICE 1
#define DATA_BUF_SIZE 4096
typedef unsigned long lu;
typedef struct ipcDevices_st{
int count;
int ordinals[MAX_DEVICES];
}ipcDevices_t;
typedef struct ipcBarrier_st{
int count;
bool sense;
bool allExit;
}ipcBarrier_t;
typedef struct ipcCUDA_st{
int device;
pid_t pid;
hipIpcEventHandle_t eventHandle;
hipIpcMemHandle_t memHandle;
}ipcCUDA_t;
ipcBarrier_t* g_barrier = nullptr;
bool g_procSense; // false
int g_processCount;
//-----------function
pid_t gettid(void){
return syscall(SYS_gettid);
}
inline bool IsAppBuiltAs64(){
return sizeof(void*) == 8;
}
void getDeviceCount(ipcDevices_t * devices){
//forkCUDA,fork
pid_t pid = fork();
if(0 == pid){ // 0
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
//cout
printf("GPUs...\n");
checkCudaErrors(hipGetDeviceCount(&count));
printf("CUDA-capable device :%i\n",count);
printf(" UVA capable devices...\n");
for(i=0; i<count; i++){
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop,i));
if(prop.unifiedAddressing){
uvaOrdinals[uvaCount++] = i;
printf("> GPU %d = %15s is capable of UVA\n",i,prop.name);
}
if(prop.computeMode != hipComputeModeDefault){
printf("> GPU Compute Mode Default\n");
printf("> nvidia-smi Compute ModeDefault\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if(uvaCount < 2){
devices->count = uvaCount; // 0 or 1
exit(EXIT_SUCCESS);
}
//peer
printf("GPUpeer to peer \n");
devices->count=1;
int canAccessPeer_0i, canAccessPeer_i0;
for(i = 1; i<uvaCount; i++){
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_0i,
uvaOrdinals[0],
uvaOrdinals[i]));
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_i0,
uvaOrdinals[i],
uvaOrdinals[0]));
if(canAccessPeer_0i*canAccessPeer_i0){
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU:[%d] and GPU:[%d], YES\n",
devices->ordinals[0],
devices->ordinals[devices->count++]);
}
exit(EXIT_SUCCESS);
}
}else{
//
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
void proBarrier(int index){
//
// __sync_add_and_fetch
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
printf(":%d; tid=[%lu], pid=%lu %d \n",index, (lu)gettid(),(lu)getpid(),newCount);
if(newCount == g_processCount){ // ,
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}else{//senseg_procSense,
while(g_barrier->sense == g_procSense)
if(!g_barrier->allExit){// cpu
// sched_yield() causes the calling thread to relinquish the CPU. The
// thread is moved to the end of the queue for its static priority and a
// new thread gets to run.
sched_yield();
}else
exit(EXIT_SUCCESS);
}
g_procSense = !g_procSense;
}
__global__ void
simpleKernel(int *dst, int *src, int num){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
dst[idx] = src[idx]/num;
}
void MultiKernel(ipcCUDA_t *s_mem, int index){
//1 - 0bufferGPU0
//2 - P2PGPU0kernel
//3 - 0
int *d_ptr;//device
int h_refData[DATA_BUF_SIZE];//host
for(int i=0; i<DATA_BUF_SIZE; i++)
h_refData[i] = rand();
checkCudaErrors(hipSetDevice(s_mem[index].device));
if(0 == index){//
printf("\nkernels\n");
int h_results[DATA_BUF_SIZE*MAX_DEVICES*PROCESSES_PRE_DEVICE];
hipEvent_t event[MAX_DEVICES*PROCESSES_PRE_DEVICE];
checkCudaErrors(hipMalloc((void**)&d_ptr,
DATA_BUF_SIZE*g_processCount*sizeof(int) ));
//device Ipc
//Gets an interprocess memory handle for an existing device memory allocation.
// IpcGetIPC
checkCudaErrors(hipIpcGetMemHandle( (hipIpcMemHandle_t*)&s_mem[0].memHandle,
(void*)d_ptr ));
checkCudaErrors(hipMemcpy( (void*)d_ptr,
(void*)h_refData,
DATA_BUF_SIZE*sizeof(int),
hipMemcpyHostToDevice ));
// b.2:elseevent handles
// hipEventCreate s_mem[index].eventHandle
proBarrier(index);
for(int i=1; i<g_processCount; i++){
//IpcOpen
checkCudaErrors(hipIpcOpenEventHandle(&event[i],
s_mem[i].eventHandle));
}
//b.3: recordkernel
proBarrier(index);
for(int i=1; i<g_processCount; i++)
checkCudaErrors(hipEventSynchronize(event[i]));
//b.5
proBarrier(index);
checkCudaErrors(hipMemcpy(h_results,
d_ptr+DATA_BUF_SIZE,
DATA_BUF_SIZE*(g_processCount-1)*sizeof(int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_ptr));
//----------------------------
printf("\n");
for(int p=1; p<g_processCount; p++){
for(int i=0; i<DATA_BUF_SIZE; i++){
if(h_refData[i]/(p+1) != h_results[(p-1)*DATA_BUF_SIZE+i]){
printf("::%d, :%d, %i, %i\n",
i, p, h_refData[i], h_results[(p-1)*DATA_BUF_SIZE+i]);
g_barrier->allExit = true;
exit(EXIT_SUCCESS);
}
}
}
printf("Result: Pass\n");
}else{
hipEvent_t event;
checkCudaErrors(hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess));
// IpcGet
checkCudaErrors(hipIpcGetEventHandle( (hipIpcEventHandle_t*)&s_mem[index].eventHandle,
event ));
//b.1: 0device
// device
proBarrier(index);
// IpcOpen
checkCudaErrors(hipIpcOpenMemHandle((void**)&d_ptr,
s_mem[0].memHandle,
hipIpcMemLazyEnablePeerAccess));
printf("> Process %3d: GPU:%d kernel %d (GPU %d)\n",
index, s_mem[index].device, 0, s_mem[0].device);
dim3 const threads(512,1);
dim3 const block(DATA_BUF_SIZE/threads.x, 1);
hipLaunchKernelGGL(( simpleKernel), dim3(block), dim3(threads), 0, 0, d_ptr+index*DATA_BUF_SIZE,
d_ptr,
index+1);
checkCudaErrors(hipEventRecord(event));
//b.4
proBarrier(index);
// Close memory mapped with hipIpcOpenMemHandle.
checkCudaErrors(hipIpcCloseMemHandle(d_ptr));
//b.6
proBarrier(index);
checkCudaErrors(hipEventDestroy(event));
}
hipDeviceReset();
}
int
main(int argc, char *argv[]){
//---------------------
#if CUDART_VERSION >= 4010 && defined(__linux)
if(!IsAppBuiltAs64()){
cout<<argv[0]<<" only supported on 64-bit Linux Os and the app \
must built as a 64-bit target."<<endl;
exit(EXIT_FAILURE);
}
cout<<"CUDA Version is OK"<<endl;
#else
cout<<"CUDA 4.1"<<endl;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
cout<<argv[0]<<" only support thwi Linux OS Kernel version 2.6.18 and higher"<<endl;
exit(EXIT_FAILURE);
#endif
//----------------------ipc
ipcDevices_t* s_devices = static_cast<ipcDevices_t*>(mmap(NULL,sizeof(*s_devices),
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0, 0 ));
assert(MAP_FAILED != s_devices);
//forkCUDA
// spawn
getDeviceCount(s_devices);
if(s_devices->count < 1){
cout<<"GPUSM2"<<endl;
exit(EXIT_SUCCESS);
}else if(s_devices->count > 1)
g_processCount = PROCESSES_PRE_DEVICE * s_devices->count;
else
g_processCount = 2;//2
g_barrier = static_cast<ipcBarrier_t*>(mmap(NULL,sizeof(*g_barrier),
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0, 0 ));
assert(MAP_FAILED != g_barrier);
memset((void*)g_barrier, 0, sizeof(*g_barrier));
// local barrier sense flag
g_procSense = 0;
// shared meory for CUDA memory an event handlers
ipcCUDA_t* s_mem = static_cast<ipcCUDA_t*>(mmap(NULL, g_processCount*sizeof(*s_mem),
PROT_READ | PROT_WRITE,
MAP_SHARED| MAP_ANONYMOUS, 0, 0));
assert(MAP_FAILED != s_mem);
// shared memory
memset((void*)s_mem, 0, g_processCount*sizeof(*s_mem));
cout<<"Spawning processes and assigning GPUs..."<<endl;
//index = 0,...,g_processCount-1
int index=0;
// g_processCount-1
for(int i=1; i<g_processCount; i++){
pid_t pid = fork();
// On success, the PID of the child process is returned in the parent,
// and 0 is returned in the child. On failure, -1 is returned in the
// parent, no child process is created, and errno is set appropriately
if(0 == pid){//index0
index = i;
break;
}else{
s_mem[i].pid = pid; // pid
}
}
// UVA
// 2
//
if(s_devices->count >1)
s_mem[index].device = s_devices->ordinals[index / PROCESSES_PRE_DEVICE];
else
s_mem[index].device = s_mem[1].device = s_devices->ordinals[0];
cout<<"> (0,) "<<index<<" -> GPU "<<s_mem[index].device<<endl;
MultiKernel(s_mem, index);
//join
if(index == 0){
for(int i=1; i<g_processCount; i++){
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(!status);
}
cout<<"Shutting down..."<<endl;
exit(EXIT_SUCCESS);
}
}
| 36444a06d2eec7dff3a28e951e18d8b8623810c3.cu | /*
在MultiKernel中有2个分支,一个是父进程,剩下都是子进程,其中有个barrier,可以如下图形式理解
============================
process0 | process1 process2
----------|-------------------------
x=1 | y=2
proBarrier proBarrier proBarrier
----------|-------------------------
print(y=2)| print(x=1)
proBarrier proBarrier proBarrier
----------|-------------------------
proBarrier proBarrier proBarrier
----------|-------------------------
==================================
如上图所示,所谓barrier,就是不同进程基于此,建立栅栏
保证栅栏下面的代码能够访问上面的其他进程的数据。通过共享内存访问
*/
#include<cassert>
#include<cstdio>
#include<iostream>
#include<helper_cuda.h>
using namespace std;
#ifdef __linux
#include<unistd.h>
#include<sched.h>
#include<sys/mman.h>
#include<sys/wait.h>
#include<sys/syscall.h>
#include<linux/version.h>
#endif
//-------------value,datatype
#define MAX_DEVICES 8
#define PROCESSES_PRE_DEVICE 1
#define DATA_BUF_SIZE 4096
typedef unsigned long lu;
typedef struct ipcDevices_st{
int count;
int ordinals[MAX_DEVICES];
}ipcDevices_t;
typedef struct ipcBarrier_st{
int count;
bool sense;
bool allExit;
}ipcBarrier_t;
typedef struct ipcCUDA_st{
int device;
pid_t pid;
cudaIpcEventHandle_t eventHandle;
cudaIpcMemHandle_t memHandle;
}ipcCUDA_t;
ipcBarrier_t* g_barrier = nullptr;
bool g_procSense; // 初始化为false
int g_processCount;
//-----------function
pid_t gettid(void){
return syscall(SYS_gettid);
}
inline bool IsAppBuiltAs64(){
return sizeof(void*) == 8;
}
void getDeviceCount(ipcDevices_t * devices){
//因为在fork之前初始化CUDA,会导致驱动上下文出问题,所以先fork
pid_t pid = fork();
if(0 == pid){ // 返回0 为子进程
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
//cout 不是线程安全的
printf("检测多个GPUs...\n");
checkCudaErrors(cudaGetDeviceCount(&count));
printf("CUDA-capable device 个数:%i\n",count);
printf("搜索 UVA capable devices...\n");
for(i=0; i<count; i++){
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop,i));
if(prop.unifiedAddressing){
uvaOrdinals[uvaCount++] = i;
printf("> GPU %d = %15s is capable of UVA\n",i,prop.name);
}
if(prop.computeMode != cudaComputeModeDefault){
printf("> GPU 设备必须处在 Compute Mode Default\n");
printf("> 请使用nvidia-smi 去更改Compute Mode为Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if(uvaCount < 2){
devices->count = uvaCount; // 0 or 1
exit(EXIT_SUCCESS);
}
//检查是否支持peer 访问
printf("检查GPU是否支持peer to peer 内存访问\n");
devices->count=1;
int canAccessPeer_0i, canAccessPeer_i0;
for(i = 1; i<uvaCount; i++){
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_0i,
uvaOrdinals[0],
uvaOrdinals[i]));
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_i0,
uvaOrdinals[i],
uvaOrdinals[0]));
if(canAccessPeer_0i*canAccessPeer_i0){
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU:[%d] and GPU:[%d], YES\n",
devices->ordinals[0],
devices->ordinals[devices->count++]);
}
exit(EXIT_SUCCESS);
}
}else{
//父进程负责等待
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
void proBarrier(int index){
// 提供多线程下变量的加减和逻辑运算的原子操作
// 实测 __sync_add_and_fetch 可以实现多进程之间原子操作;
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
printf("当前:%d; tid=[%lu], pid=%lu %d \n",index, (lu)gettid(),(lu)getpid(),newCount);
if(newCount == g_processCount){ // 如果是最后一个进程,重置
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}else{//如果不是最后一个进程,则sense等于g_procSense,进行栅栏
while(g_barrier->sense == g_procSense)
if(!g_barrier->allExit){// 即栅栏没释放的时候,让所有线程都空循环,且不占用cpu
// sched_yield() causes the calling thread to relinquish the CPU. The
// thread is moved to the end of the queue for its static priority and a
// new thread gets to run.
sched_yield();
}else
exit(EXIT_SUCCESS);
}
g_procSense = !g_procSense;
}
__global__ void
simpleKernel(int *dst, int *src, int num){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
dst[idx] = src[idx]/num;
}
void MultiKernel(ipcCUDA_t *s_mem, int index){
//1 - 进程0将引用的buffer装载进GPU0的显存
//2 - 其他进程使用P2P,在GPU0上发起一个kernel,
//3 - 进程0检查结果
int *d_ptr;//device
int h_refData[DATA_BUF_SIZE];//host
for(int i=0; i<DATA_BUF_SIZE; i++)
h_refData[i] = rand();
checkCudaErrors(cudaSetDevice(s_mem[index].device));
if(0 == index){//父进程
printf("\n父进程,准备运行kernels\n");
int h_results[DATA_BUF_SIZE*MAX_DEVICES*PROCESSES_PRE_DEVICE];
cudaEvent_t event[MAX_DEVICES*PROCESSES_PRE_DEVICE];
checkCudaErrors(cudaMalloc((void**)&d_ptr,
DATA_BUF_SIZE*g_processCount*sizeof(int) ));
//device Ipc
//Gets an interprocess memory handle for an existing device memory allocation.
// IpcGet意思就是在创建的进程中,将其送到IPC对应的内存上
checkCudaErrors(cudaIpcGetMemHandle( (cudaIpcMemHandle_t*)&s_mem[0].memHandle,
(void*)d_ptr ));
checkCudaErrors(cudaMemcpy( (void*)d_ptr,
(void*)h_refData,
DATA_BUF_SIZE*sizeof(int),
cudaMemcpyHostToDevice ));
// b.2:栅栏,让其他子进程走else分支,完成event handles创建完成
// cudaEventCreate s_mem[index].eventHandle 其他进程都创建完成
proBarrier(index);
for(int i=1; i<g_processCount; i++){
//IpcOpen就是获取其他进程的句柄
checkCudaErrors(cudaIpcOpenEventHandle(&event[i],
s_mem[i].eventHandle));
}
//b.3: 等待所有进程开启事件record和kernel
proBarrier(index);
for(int i=1; i<g_processCount; i++)
checkCudaErrors(cudaEventSynchronize(event[i]));
//b.5
proBarrier(index);
checkCudaErrors(cudaMemcpy(h_results,
d_ptr+DATA_BUF_SIZE,
DATA_BUF_SIZE*(g_processCount-1)*sizeof(int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_ptr));
//----------------------------
printf("检查结果是否正确\n");
for(int p=1; p<g_processCount; p++){
for(int i=0; i<DATA_BUF_SIZE; i++){
if(h_refData[i]/(p+1) != h_results[(p-1)*DATA_BUF_SIZE+i]){
printf("失败:索引:%d, 进程索引:%d, %i, %i\n",
i, p, h_refData[i], h_results[(p-1)*DATA_BUF_SIZE+i]);
g_barrier->allExit = true;
exit(EXIT_SUCCESS);
}
}
}
printf("Result: Pass\n");
}else{
cudaEvent_t event;
checkCudaErrors(cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess));
// IpcGet 将当前进程中的句柄送到共享内存上
checkCudaErrors(cudaIpcGetEventHandle( (cudaIpcEventHandle_t*)&s_mem[index].eventHandle,
event ));
//b.1: 等进程0初始化device显存
// 对于其他进程运行过程而言,这一步,需要父进程完成device显存初始化
proBarrier(index);
// IpcOpen就是获取共享内存上,其他进程创建的句柄
checkCudaErrors(cudaIpcOpenMemHandle((void**)&d_ptr,
s_mem[0].memHandle,
cudaIpcMemLazyEnablePeerAccess));
printf("> Process %3d: 在GPU:%d 上运行kernel,从进程 %d (对应GPU %d)上读写数据\n",
index, s_mem[index].device, 0, s_mem[0].device);
dim3 const threads(512,1);
dim3 const block(DATA_BUF_SIZE/threads.x, 1);
simpleKernel<<<block, threads>>>(d_ptr+index*DATA_BUF_SIZE,
d_ptr,
index+1);
checkCudaErrors(cudaEventRecord(event));
//b.4
proBarrier(index);
// Close memory mapped with cudaIpcOpenMemHandle.
checkCudaErrors(cudaIpcCloseMemHandle(d_ptr));
//b.6 等所有子进程完成事件的使用
proBarrier(index);
checkCudaErrors(cudaEventDestroy(event));
}
cudaDeviceReset();
}
int
main(int argc, char *argv[]){
//---------------------预检查
#if CUDART_VERSION >= 4010 && defined(__linux)
if(!IsAppBuiltAs64()){
cout<<argv[0]<<" only supported on 64-bit Linux Os and the app \
must built as a 64-bit target."<<endl;
exit(EXIT_FAILURE);
}
cout<<"CUDA Version is OK"<<endl;
#else
cout<<"需要CUDA 4.1"<<endl;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
cout<<argv[0]<<" only support thwi Linux OS Kernel version 2.6.18 and higher"<<endl;
exit(EXIT_FAILURE);
#endif
//----------------------ipc准备
ipcDevices_t* s_devices = static_cast<ipcDevices_t*>(mmap(NULL,sizeof(*s_devices),
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0, 0 ));
assert(MAP_FAILED != s_devices);
//因为后面才是多进程的操作,不能在fork之前初始化CUDA,
// 所以必须先spawn一个进程,在子进程中进行数据获取
getDeviceCount(s_devices);
if(s_devices->count < 1){
cout<<"需要GPU,且SM大于2的"<<endl;
exit(EXIT_SUCCESS);
}else if(s_devices->count > 1)
g_processCount = PROCESSES_PRE_DEVICE * s_devices->count;
else
g_processCount = 2;//如果只有一个设备,那就开2个进程
g_barrier = static_cast<ipcBarrier_t*>(mmap(NULL,sizeof(*g_barrier),
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0, 0 ));
assert(MAP_FAILED != g_barrier);
memset((void*)g_barrier, 0, sizeof(*g_barrier));
// 设置local barrier sense flag
g_procSense = 0;
//声明共享内存 shared meory for CUDA memory an event handlers
ipcCUDA_t* s_mem = static_cast<ipcCUDA_t*>(mmap(NULL, g_processCount*sizeof(*s_mem),
PROT_READ | PROT_WRITE,
MAP_SHARED| MAP_ANONYMOUS, 0, 0));
assert(MAP_FAILED != s_mem);
//初始化共享内存 shared memory
memset((void*)s_mem, 0, g_processCount*sizeof(*s_mem));
cout<<"Spawning processes and assigning GPUs..."<<endl;
//index = 0,...,g_processCount-1
int index=0;
//分裂 g_processCount-1 个额外的进程
for(int i=1; i<g_processCount; i++){
pid_t pid = fork();
// On success, the PID of the child process is returned in the parent,
// and 0 is returned in the child. On failure, -1 is returned in the
// parent, no child process is created, and errno is set appropriately
if(0 == pid){//如果子进程创建成功,则index保存0之后的序号
index = i;
break;
}else{
s_mem[i].pid = pid; // 保存父进程的pid
}
}
// 把UVA可用设备散入进程中(一个进程一个设备)
// 如果只有一个设备,那就起2个进程
//这里是处在某个进程中,各自对共享内存的各自部分进行写入
if(s_devices->count >1)
s_mem[index].device = s_devices->ordinals[index / PROCESSES_PRE_DEVICE];
else
s_mem[index].device = s_mem[1].device = s_devices->ordinals[0];
cout<<"> 进程(0为父,其他为子进程) "<<index<<" -> GPU "<<s_mem[index].device<<endl;
MultiKernel(s_mem, index);
//等待其他子进程结束,就是join
if(index == 0){
for(int i=1; i<g_processCount; i++){
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(!status);
}
cout<<"Shutting down..."<<endl;
exit(EXIT_SUCCESS);
}
}
|
c6873942eeda4811817265112c72d69f646c345b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void transpose(float * A, float * C, int width)
{
__shared__ float shared_0[32][33];
float sum = 0;
{
int it_2;
#pragma unroll
for (it_2=0; it_2<32; it_2=(it_2+1))
{
shared_0[it_2][tidx]=A(((idx+(-1*tidx))+it_2), (coalesced_idy+tidx));
}
__syncthreads();
sum=shared_0[tidx][(idy+(-1*coalesced_idy))];
__syncthreads();
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
| c6873942eeda4811817265112c72d69f646c345b.cu | #define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void transpose(float * A, float * C, int width)
{
__shared__ float shared_0[32][33];
float sum = 0;
{
int it_2;
#pragma unroll
for (it_2=0; it_2<32; it_2=(it_2+1))
{
shared_0[it_2][tidx]=A(((idx+(-1*tidx))+it_2), (coalesced_idy+tidx));
}
__syncthreads();
sum=shared_0[tidx][(idy+(-1*coalesced_idy))];
__syncthreads();
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
|
7c1af0df71636340bd5dca1072edb8cabf62899e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file CUDAUtils.cpp
*
* @license
* Copyright (c) 2009-2013
* Fraunhofer Institute for Algorithms and Scientific Computing SCAI
* for Fraunhofer-Gesellschaft
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* @endlicense
*
* @brief Implementation of CSR utilities with CUDA
* @author Thomas Brandes
* @date 02.07.2012
* @since 1.0.0
*/
#include <lama/LAMAInterface.hpp>
#include <lama/LAMAInterfaceRegistry.hpp>
#include <lama/cuda/utils.cu.h>
#include <lama/cuda/CUDAError.hpp>
#include <lama/cuda/CUDAUtils.hpp>
#include <lama/exception/LAMAAssert.hpp>
// thrust
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
// others
#include <typeinfo>
namespace lama
{
LAMA_LOG_DEF_LOGGER( CUDAUtils::logger, "CUDA.Utils" )
/* ------------------------------------------------------------------------------------------------------------------ */
/* scale */
/* ------------------------------------------------------------------------------------------------------------------ */
template<typename ValueType,typename OtherValueType>
void CUDAUtils::scale( ValueType *mValues, const IndexType n, const OtherValueType value )
{
LAMA_LOG_INFO( logger, "scale, #n = " << n << ", value = " << value )
LAMA_CHECK_CUDA_ACCESS
ValueType castedValue = static_cast<ValueType>( value );
thrust::device_ptr<ValueType> mValuesPtr( const_cast<ValueType*>( mValues ) );
thrust::constant_iterator<ValueType> valueItr( castedValue );
thrust::transform( mValuesPtr, mValuesPtr + n, valueItr, mValuesPtr, thrust::multiplies<ValueType>() );
}
/* --------------------------------------------------------------------------- */
template<typename T>
struct InvalidIndex
{
const T size; //!< size of array for which index is checked
InvalidIndex( T _size ) : size( _size ) {}
__host__ __device__
bool operator()( T y )
{
return y >= size || y < 0;
}
};
/* --------------------------------------------------------------------------- */
bool CUDAUtils::validIndexes( const IndexType array[], const IndexType n, const IndexType size )
{
LAMA_LOG_DEBUG( logger, "validIndexes: array[" << n << "], size " << size )
bool validFlag = true;
if ( n > 0 )
{
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<IndexType> arrayPtr( const_cast<IndexType*> ( array ) );
bool error = false;
// count invalid indexes
error = thrust::transform_reduce( arrayPtr,
arrayPtr + n,
InvalidIndex<IndexType>( size ),
false,
thrust::logical_or<bool>() );
if ( error )
{
validFlag = false;
}
}
return validFlag;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::sum( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "sum # array = " << array << ", n = " << n )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::reduce( data, data + n, zero, thrust::plus<ValueType>() );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "sum of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::setVal( ValueType array[], const IndexType n, const ValueType val )
{
LAMA_LOG_INFO( logger, "setVal # array = " << array << ", n = " << n << ", val = " << val )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
thrust::fill( data, data + n, val );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::setOrder( ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "setOrder # array = " << array << ", n = " << n )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> array_ptr( const_cast<ValueType*>( array ) );
thrust::sequence( array_ptr, array_ptr + n );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::getValue( const ValueType* array, const IndexType i )
{
LAMA_LOG_INFO( logger, "getValue # i = " << i )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> arrayPtr( const_cast<ValueType*>( array ) );
thrust::host_vector<ValueType> arrayHost( arrayPtr + i, arrayPtr + i + 1 );
return arrayHost[0];
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::maxval( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "maxval for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::reduce( data, data + n, zero, thrust::maximum<ValueType>() );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "max of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename T>
struct absolute_value: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()( const T &x ) const
{
return x < T( 0 ) ? -x : x;
}
};
template<typename ValueType>
ValueType CUDAUtils::absMaxVal( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "absMaxVal for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::transform_reduce( data, data + n, absolute_value<ValueType>(), zero,
thrust::maximum<ValueType>() );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "abs max of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::absMaxDiffVal( const ValueType array1[], const ValueType array2[], const IndexType n )
{
LAMA_LOG_INFO( logger, "absMaxDiffVal for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data1( const_cast<ValueType*>( array1 ) );
thrust::device_ptr<ValueType> data2( const_cast<ValueType*>( array2 ) );
thrust::device_vector<ValueType> temp( n );
// compute temp = array1 - array2
thrust::transform( data1, data1 + n, data2, temp.begin(), thrust::minus<ValueType>() );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::transform_reduce( temp.begin(), temp.end(), absolute_value<ValueType>(), zero,
thrust::maximum<ValueType>() );
/* Not available, but would be useful:
ValueType result = thrust::transform_reduce( data1, data1 + n,
data2,
thrust::minus<ValueType>(),
zero,
thrust::maximum<ValueType>());
*/
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" )
LAMA_LOG_INFO( logger, "abs max diff of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
// template argument ascending: make two instantiations of kernel to avoid bool test
template<typename ValueType, bool ascending>
__global__
void isSortedKernel( bool* result, const IndexType numValues, const ValueType* values )
{
const int i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < numValues )
{
if ( ascending )
{
result[i] = values[i] <= values[i+1];
}
else
{
result[i] = values[i] >= values[i+1];
}
}
}
template<typename ValueType>
bool CUDAUtils::isSorted( const ValueType array[], const IndexType n, bool ascending )
{
LAMA_LOG_INFO( logger, "isSorted<" << typeid( ValueType ).name()
<< ">, n = " << n << ", ascending = " << ascending )
LAMA_CHECK_CUDA_ACCESS
if ( n < 2 )
{
return true; // 0 or 1 element is always sorted
}
// create a tempory bool array on device with n-1 entries
thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( n - 1 );
bool* resultRawPtr = thrust::raw_pointer_cast( resultPtr );
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n - 1, dimBlock.x );
if ( ascending )
{
hipLaunchKernelGGL(( isSortedKernel<ValueType, true>), dim3(dimGrid), dim3(dimBlock), 0, 0, resultRawPtr, n - 1, array );
}
else
{
hipLaunchKernelGGL(( isSortedKernel<ValueType, false>), dim3(dimGrid), dim3(dimBlock), 0, 0, resultRawPtr, n - 1, array );
}
hipStreamSynchronize( 0 );
LAMA_CHECK_CUDA_ERROR
return thrust::reduce( resultPtr, resultPtr + n - 1, true, thrust::logical_and<bool>() );
}
/* --------------------------------------------------------------------------- */
template<typename ValueType1,typename ValueType2>
__global__
void gatherKernel( ValueType1* out, const ValueType2* in, const IndexType* indexes, IndexType n )
{
// Kernel also supports implicit type conversions
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[i] = static_cast<ValueType1>( in[indexes[i]] );
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::setGather( ValueType1 out[], const ValueType2 in[], const IndexType indexes[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"setGather<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_CHECK_CUDA_ACCESS
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
hipLaunchKernelGGL(( gatherKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, out, in, indexes, n );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename T1,typename T2>
__global__
void scatter_kernel( T1* out, const IndexType* indexes, const T2* in, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[indexes[i]] = in[i];
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::setScatter( ValueType1 out[], const IndexType indexes[], const ValueType2 in[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"setScatter<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_CHECK_CUDA_ACCESS
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
hipLaunchKernelGGL(( scatter_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, out, indexes, in, n );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename T1,typename T2>
__global__
void setKernel( T1* out, const T2* in, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[i] = static_cast<T1>( in[i] );
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::set( ValueType1 out[], const ValueType2 in[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"set<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_LOG_DEBUG( logger, "out = " << out << ", in = " << in )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
hipLaunchKernelGGL(( setKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, out, in, n );
LAMA_CUDA_RT_CALL( hipStreamSynchronize( 0 ), "hipStreamSynchronize( 0 )" );
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
__global__
void invertVectorComponents_kernel( ValueType* array, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
ValueType one = 1.0;
if ( i < n )
{
array[i] = one / array[i];
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::invert( ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"invert Vector components for vector of type " << typeid(ValueType).name() << " and size n = " << n << "." )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
hipLaunchKernelGGL(( invertVectorComponents_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, array, n );
hipStreamSynchronize( 0 );
LAMA_CHECK_CUDA_ERROR
}
}
/* --------------------------------------------------------------------------- */
/* Template instantiations via registration routine */
/* --------------------------------------------------------------------------- */
void CUDAUtils::setInterface( UtilsInterface& Utils )
{
LAMA_INTERFACE_REGISTER( Utils, validIndexes )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, double, double )
LAMA_INTERFACE_REGISTER_T( Utils, sum, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, sum, float )
LAMA_INTERFACE_REGISTER_T( Utils, sum, double )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, setOrder, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, float )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, double )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, float )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, double )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, float )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, double )
LAMA_INTERFACE_REGISTER_TT( Utils, set, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, set, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, set, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, set, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, set, double, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, double, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, double, double )
LAMA_INTERFACE_REGISTER_T( Utils, invert, float )
LAMA_INTERFACE_REGISTER_T( Utils, invert, double )
}
/* --------------------------------------------------------------------------- */
/* Static registration of the Utils routines */
/* --------------------------------------------------------------------------- */
bool CUDAUtils::registerInterface()
{
LAMAInterface& interface = LAMAInterfaceRegistry::getRegistry().modifyInterface( Context::CUDA );
setInterface( interface.Utils );
return true;
}
/* --------------------------------------------------------------------------- */
/* Static initialiazion at program start */
/* --------------------------------------------------------------------------- */
bool CUDAUtils::initialized = registerInterface();
} // namespace lama
| 7c1af0df71636340bd5dca1072edb8cabf62899e.cu | /**
* @file CUDAUtils.cpp
*
* @license
* Copyright (c) 2009-2013
* Fraunhofer Institute for Algorithms and Scientific Computing SCAI
* for Fraunhofer-Gesellschaft
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* @endlicense
*
* @brief Implementation of CSR utilities with CUDA
* @author Thomas Brandes
* @date 02.07.2012
* @since 1.0.0
*/
#include <lama/LAMAInterface.hpp>
#include <lama/LAMAInterfaceRegistry.hpp>
#include <lama/cuda/utils.cu.h>
#include <lama/cuda/CUDAError.hpp>
#include <lama/cuda/CUDAUtils.hpp>
#include <lama/exception/LAMAAssert.hpp>
// thrust
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
// others
#include <typeinfo>
namespace lama
{
LAMA_LOG_DEF_LOGGER( CUDAUtils::logger, "CUDA.Utils" )
/* ------------------------------------------------------------------------------------------------------------------ */
/* scale */
/* ------------------------------------------------------------------------------------------------------------------ */
template<typename ValueType,typename OtherValueType>
void CUDAUtils::scale( ValueType *mValues, const IndexType n, const OtherValueType value )
{
LAMA_LOG_INFO( logger, "scale, #n = " << n << ", value = " << value )
LAMA_CHECK_CUDA_ACCESS
ValueType castedValue = static_cast<ValueType>( value );
thrust::device_ptr<ValueType> mValuesPtr( const_cast<ValueType*>( mValues ) );
thrust::constant_iterator<ValueType> valueItr( castedValue );
thrust::transform( mValuesPtr, mValuesPtr + n, valueItr, mValuesPtr, thrust::multiplies<ValueType>() );
}
/* --------------------------------------------------------------------------- */
template<typename T>
struct InvalidIndex
{
const T size; //!< size of array for which index is checked
InvalidIndex( T _size ) : size( _size ) {}
__host__ __device__
bool operator()( T y )
{
return y >= size || y < 0;
}
};
/* --------------------------------------------------------------------------- */
bool CUDAUtils::validIndexes( const IndexType array[], const IndexType n, const IndexType size )
{
LAMA_LOG_DEBUG( logger, "validIndexes: array[" << n << "], size " << size )
bool validFlag = true;
if ( n > 0 )
{
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<IndexType> arrayPtr( const_cast<IndexType*> ( array ) );
bool error = false;
// count invalid indexes
error = thrust::transform_reduce( arrayPtr,
arrayPtr + n,
InvalidIndex<IndexType>( size ),
false,
thrust::logical_or<bool>() );
if ( error )
{
validFlag = false;
}
}
return validFlag;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::sum( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "sum # array = " << array << ", n = " << n )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::reduce( data, data + n, zero, thrust::plus<ValueType>() );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "sum of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::setVal( ValueType array[], const IndexType n, const ValueType val )
{
LAMA_LOG_INFO( logger, "setVal # array = " << array << ", n = " << n << ", val = " << val )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
thrust::fill( data, data + n, val );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::setOrder( ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "setOrder # array = " << array << ", n = " << n )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> array_ptr( const_cast<ValueType*>( array ) );
thrust::sequence( array_ptr, array_ptr + n );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::getValue( const ValueType* array, const IndexType i )
{
LAMA_LOG_INFO( logger, "getValue # i = " << i )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> arrayPtr( const_cast<ValueType*>( array ) );
thrust::host_vector<ValueType> arrayHost( arrayPtr + i, arrayPtr + i + 1 );
return arrayHost[0];
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::maxval( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "maxval for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::reduce( data, data + n, zero, thrust::maximum<ValueType>() );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "max of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename T>
struct absolute_value: public thrust::unary_function<T,T>
{
__host__ __device__
T operator()( const T &x ) const
{
return x < T( 0 ) ? -x : x;
}
};
template<typename ValueType>
ValueType CUDAUtils::absMaxVal( const ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger, "absMaxVal for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data( const_cast<ValueType*>( array ) );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::transform_reduce( data, data + n, absolute_value<ValueType>(), zero,
thrust::maximum<ValueType>() );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
LAMA_LOG_INFO( logger, "abs max of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
ValueType CUDAUtils::absMaxDiffVal( const ValueType array1[], const ValueType array2[], const IndexType n )
{
LAMA_LOG_INFO( logger, "absMaxDiffVal for " << n << " elements " )
LAMA_CHECK_CUDA_ACCESS
thrust::device_ptr<ValueType> data1( const_cast<ValueType*>( array1 ) );
thrust::device_ptr<ValueType> data2( const_cast<ValueType*>( array2 ) );
thrust::device_vector<ValueType> temp( n );
// compute temp = array1 - array2
thrust::transform( data1, data1 + n, data2, temp.begin(), thrust::minus<ValueType>() );
ValueType zero = static_cast<ValueType>( 0 );
ValueType result = thrust::transform_reduce( temp.begin(), temp.end(), absolute_value<ValueType>(), zero,
thrust::maximum<ValueType>() );
/* Not available, but would be useful:
ValueType result = thrust::transform_reduce( data1, data1 + n,
data2,
thrust::minus<ValueType>(),
zero,
thrust::maximum<ValueType>());
*/
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" )
LAMA_LOG_INFO( logger, "abs max diff of " << n << " values = " << result )
return result;
}
/* --------------------------------------------------------------------------- */
// template argument ascending: make two instantiations of kernel to avoid bool test
template<typename ValueType, bool ascending>
__global__
void isSortedKernel( bool* result, const IndexType numValues, const ValueType* values )
{
const int i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < numValues )
{
if ( ascending )
{
result[i] = values[i] <= values[i+1];
}
else
{
result[i] = values[i] >= values[i+1];
}
}
}
template<typename ValueType>
bool CUDAUtils::isSorted( const ValueType array[], const IndexType n, bool ascending )
{
LAMA_LOG_INFO( logger, "isSorted<" << typeid( ValueType ).name()
<< ">, n = " << n << ", ascending = " << ascending )
LAMA_CHECK_CUDA_ACCESS
if ( n < 2 )
{
return true; // 0 or 1 element is always sorted
}
// create a tempory bool array on device with n-1 entries
thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( n - 1 );
bool* resultRawPtr = thrust::raw_pointer_cast( resultPtr );
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n - 1, dimBlock.x );
if ( ascending )
{
isSortedKernel<ValueType, true><<<dimGrid, dimBlock>>> ( resultRawPtr, n - 1, array );
}
else
{
isSortedKernel<ValueType, false><<<dimGrid, dimBlock>>> ( resultRawPtr, n - 1, array );
}
cudaStreamSynchronize( 0 );
LAMA_CHECK_CUDA_ERROR
return thrust::reduce( resultPtr, resultPtr + n - 1, true, thrust::logical_and<bool>() );
}
/* --------------------------------------------------------------------------- */
template<typename ValueType1,typename ValueType2>
__global__
void gatherKernel( ValueType1* out, const ValueType2* in, const IndexType* indexes, IndexType n )
{
// Kernel also supports implicit type conversions
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[i] = static_cast<ValueType1>( in[indexes[i]] );
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::setGather( ValueType1 out[], const ValueType2 in[], const IndexType indexes[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"setGather<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_CHECK_CUDA_ACCESS
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
gatherKernel<<<dimGrid,dimBlock>>>( out, in, indexes, n );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename T1,typename T2>
__global__
void scatter_kernel( T1* out, const IndexType* indexes, const T2* in, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[indexes[i]] = in[i];
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::setScatter( ValueType1 out[], const IndexType indexes[], const ValueType2 in[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"setScatter<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_CHECK_CUDA_ACCESS
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
scatter_kernel<<<dimGrid,dimBlock>>>( out, indexes, in, n );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
}
/* --------------------------------------------------------------------------- */
template<typename T1,typename T2>
__global__
void setKernel( T1* out, const T2* in, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
if ( i < n )
{
out[i] = static_cast<T1>( in[i] );
}
}
template<typename ValueType1,typename ValueType2>
void CUDAUtils::set( ValueType1 out[], const ValueType2 in[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"set<" << typeid(ValueType1).name() << "," << typeid(ValueType2).name() << ">( ..., n = " << n << ")" )
LAMA_LOG_DEBUG( logger, "out = " << out << ", in = " << in )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
setKernel<<<dimGrid,dimBlock>>>( out, in, n );
LAMA_CUDA_RT_CALL( cudaStreamSynchronize( 0 ), "cudaStreamSynchronize( 0 )" );
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
__global__
void invertVectorComponents_kernel( ValueType* array, IndexType n )
{
const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx );
ValueType one = 1.0;
if ( i < n )
{
array[i] = one / array[i];
}
}
/* --------------------------------------------------------------------------- */
template<typename ValueType>
void CUDAUtils::invert( ValueType array[], const IndexType n )
{
LAMA_LOG_INFO( logger,
"invert Vector components for vector of type " << typeid(ValueType).name() << " and size n = " << n << "." )
LAMA_CHECK_CUDA_ACCESS
if ( n > 0 )
{
const int block_size = 256;
dim3 dimBlock( block_size, 1, 1 );
dim3 dimGrid = makeGrid( n, dimBlock.x );
invertVectorComponents_kernel<<<dimGrid,dimBlock>>>( array, n );
cudaStreamSynchronize( 0 );
LAMA_CHECK_CUDA_ERROR
}
}
/* --------------------------------------------------------------------------- */
/* Template instantiations via registration routine */
/* --------------------------------------------------------------------------- */
void CUDAUtils::setInterface( UtilsInterface& Utils )
{
LAMA_INTERFACE_REGISTER( Utils, validIndexes )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, scale, double, double )
LAMA_INTERFACE_REGISTER_T( Utils, sum, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, sum, float )
LAMA_INTERFACE_REGISTER_T( Utils, sum, double )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, setVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, setOrder, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, float )
LAMA_INTERFACE_REGISTER_T( Utils, getValue, double )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, float )
LAMA_INTERFACE_REGISTER_T( Utils, maxval, double )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, float )
LAMA_INTERFACE_REGISTER_T( Utils, absMaxDiffVal, double )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, IndexType )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, float )
LAMA_INTERFACE_REGISTER_T( Utils, isSorted, double )
LAMA_INTERFACE_REGISTER_TT( Utils, set, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, set, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, set, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, set, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, set, double, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setScatter, double, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, int, int )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, float, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, float, double )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, double, float )
LAMA_INTERFACE_REGISTER_TT( Utils, setGather, double, double )
LAMA_INTERFACE_REGISTER_T( Utils, invert, float )
LAMA_INTERFACE_REGISTER_T( Utils, invert, double )
}
/* --------------------------------------------------------------------------- */
/* Static registration of the Utils routines */
/* --------------------------------------------------------------------------- */
bool CUDAUtils::registerInterface()
{
LAMAInterface& interface = LAMAInterfaceRegistry::getRegistry().modifyInterface( Context::CUDA );
setInterface( interface.Utils );
return true;
}
/* --------------------------------------------------------------------------- */
/* Static initialiazion at program start */
/* --------------------------------------------------------------------------- */
bool CUDAUtils::initialized = registerInterface();
} // namespace lama
|
a4b5fd964c914f2a1183a56047336e08622891ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
/*
template <>
void caffe_gpu_transpose<float>(const int m, const int n, const float* A, float* C) {
const float alpha = 1;
const float beta = 0;
CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, &alpha, A, n, &beta, A, n, C, m));
}
template <>
void caffe_gpu_transpose<double>(const int m, const int n, const double* A, double* C) {
const double alpha = 1;
const double beta = 0;
CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, &alpha, A, n, &beta, A, n, C, m));
}
*/
template <>
void caffe_gpu_transpose<float>(const int M, const int N,
const float* A, float* C) {
// C is MxN
// Takes as input a M x N matrix A stored in row major order and returns
// the same M x N matrix C stored in column major order
CHECK_NE(A, C);
int lda = N;
int ldb = M;
int ldc = M;
const float alpha = 1.0f;
const float beta = 0.0f;
CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N,
M, N, &alpha, A, lda, &beta, NULL, ldb, C, ldc));
}
template <>
void caffe_gpu_transpose<double>(const int M, const int N,
const double* A, double* C) {
// C is MxN
// Takes as input a M x N matrix A stored in row major order and returns
// the same M x N matrix C stored in column major order
CHECK_NE(A, C);
int lda = N;
int ldb = M;
int ldc = M;
const double alpha = 1.0f;
const double beta = 0.0f;
CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N,
M, N, &alpha, A, lda, &beta, NULL, ldb, C, ldc));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| a4b5fd964c914f2a1183a56047336e08622891ca.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
/*
template <>
void caffe_gpu_transpose<float>(const int m, const int n, const float* A, float* C) {
const float alpha = 1;
const float beta = 0;
CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, m, n, &alpha, A, n, &beta, A, n, C, m));
}
template <>
void caffe_gpu_transpose<double>(const int m, const int n, const double* A, double* C) {
const double alpha = 1;
const double beta = 0;
CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, m, n, &alpha, A, n, &beta, A, n, C, m));
}
*/
template <>
void caffe_gpu_transpose<float>(const int M, const int N,
const float* A, float* C) {
// C is MxN
// Takes as input a M x N matrix A stored in row major order and returns
// the same M x N matrix C stored in column major order
CHECK_NE(A, C);
int lda = N;
int ldb = M;
int ldc = M;
const float alpha = 1.0f;
const float beta = 0.0f;
CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N,
M, N, &alpha, A, lda, &beta, NULL, ldb, C, ldc));
}
template <>
void caffe_gpu_transpose<double>(const int M, const int N,
const double* A, double* C) {
// C is MxN
// Takes as input a M x N matrix A stored in row major order and returns
// the same M x N matrix C stored in column major order
CHECK_NE(A, C);
int lda = N;
int ldb = M;
int ldc = M;
const double alpha = 1.0f;
const double beta = 0.0f;
CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N,
M, N, &alpha, A, lda, &beta, NULL, ldb, C, ldc));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
8eb7aa4ee5532f4ee82020638d1e674600439502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include <cuda/runtime_api.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <thread>
using element_t = float;
using clock_value_t = long long;
__device__ void gpu_sleep(clock_value_t sleep_cycles)
{
clock_value_t start = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start; }
while (cycles_elapsed < sleep_cycles);
}
template <typename T>
__global__ void add(
const T* __restrict__ lhs,
const T* __restrict__ rhs,
T* __restrict__ result,
size_t length)
{
auto global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length) {
result[global_index] = lhs[global_index] + rhs[global_index];
gpu_sleep(200000);
}
}
/*
* Produce a launch configuration with one thread covering each element
*/
cuda::launch_configuration_t make_linear_launch_config(
const cuda::device_t device,
size_t length)
{
auto threads_per_block = device.properties().max_threads_per_block();
cuda::grid::dimension_t num_blocks =
(length / threads_per_block) +
(length % threads_per_block == 0 ? 0 : 1);
return cuda::make_launch_config(num_blocks, threads_per_block, cuda::no_dynamic_shared_memory);
}
struct buffer_set_t {
cuda::memory::host::unique_ptr<element_t[]> host_lhs;
cuda::memory::host::unique_ptr<element_t[]> host_rhs;
cuda::memory::host::unique_ptr<element_t[]> host_result;
cuda::memory::device::unique_ptr<element_t[]> device_lhs;
cuda::memory::device::unique_ptr<element_t[]> device_rhs;
cuda::memory::device::unique_ptr<element_t[]> device_result;
};
std::vector<buffer_set_t> generate_buffers(
const cuda::device_t device,
size_t num_kernels,
size_t num_elements)
{
// TODO: This should be an std::array, but generating
// it is a bit tricky and I don't want to burden the example
// with template wizardry
std::vector<buffer_set_t> buffers;
std::generate_n(std::back_inserter(buffers), num_kernels,
[&]() {
return buffer_set_t {
// Sticking to C++11 here...
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements)
};
}
);
// TODO: Consider actually filling the buffers
return buffers;
}
int main(int argc, char **argv)
{
constexpr size_t num_kernels = 5;
constexpr size_t num_elements = 1e7;
auto device = cuda::device::current::get();
std::cout << "Using CUDA device " << device.name() << " (having ID " << device.id() << ")\n";
std::cout << "Generating host buffers... " << std::flush;
auto buffers = generate_buffers(device, num_kernels, num_elements);
std::cout << "done.\n" << std::flush;
std::vector<cuda::stream_t> streams;
streams.reserve(num_kernels);
std::generate_n(std::back_inserter(streams), num_kernels,
[&]() { return device.create_stream(cuda::stream::async); });
auto common_launch_config = make_linear_launch_config(device, num_elements);
auto buffer_size = num_elements * sizeof(element_t);
std::cout
<< "Running " << num_kernels << " sequences of HtoD-kernel-DtoH, in parallel" << std::endl;
// Unfortunately, we need to use indices here - unless we
// had access to a zip iterator (e.g. boost::zip_iterator)
for(size_t k = 0; k < num_kernels; k++) {
auto& stream = streams[k];
auto& buffer_set = buffers[k];
stream.enqueue.copy(buffer_set.device_lhs.get(), buffer_set.host_lhs.get(), buffer_size);
stream.enqueue.copy(buffer_set.device_rhs.get(), buffer_set.host_rhs.get(), buffer_size);
stream.enqueue.kernel_launch(
add<element_t>,
common_launch_config,
buffer_set.device_lhs.get(),
buffer_set.device_rhs.get(),
buffer_set.device_result.get(),
num_elements);
stream.enqueue.copy(buffer_set.host_result.get(), buffer_set.device_result.get(), buffer_size);
stream.enqueue.host_function_call(
[k](cuda::stream_t) {
std::cout
<< "Stream " << k+1 << " of " << num_kernels << " has concluded all work. " << std::endl;
}
);
}
std::this_thread::sleep_for(std::chrono::microseconds(50000));
for(auto& stream : streams) { stream.synchronize(); }
cuda::outstanding_error::ensure_none();
// TODO: Consider checking for correctness here
std::cout << "\nSUCCESS" << std::endl;
}
| 8eb7aa4ee5532f4ee82020638d1e674600439502.cu | /**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include <cuda/runtime_api.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <thread>
using element_t = float;
using clock_value_t = long long;
__device__ void gpu_sleep(clock_value_t sleep_cycles)
{
clock_value_t start = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start; }
while (cycles_elapsed < sleep_cycles);
}
template <typename T>
__global__ void add(
const T* __restrict__ lhs,
const T* __restrict__ rhs,
T* __restrict__ result,
size_t length)
{
auto global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length) {
result[global_index] = lhs[global_index] + rhs[global_index];
gpu_sleep(200000);
}
}
/*
* Produce a launch configuration with one thread covering each element
*/
cuda::launch_configuration_t make_linear_launch_config(
const cuda::device_t device,
size_t length)
{
auto threads_per_block = device.properties().max_threads_per_block();
cuda::grid::dimension_t num_blocks =
(length / threads_per_block) +
(length % threads_per_block == 0 ? 0 : 1);
return cuda::make_launch_config(num_blocks, threads_per_block, cuda::no_dynamic_shared_memory);
}
struct buffer_set_t {
cuda::memory::host::unique_ptr<element_t[]> host_lhs;
cuda::memory::host::unique_ptr<element_t[]> host_rhs;
cuda::memory::host::unique_ptr<element_t[]> host_result;
cuda::memory::device::unique_ptr<element_t[]> device_lhs;
cuda::memory::device::unique_ptr<element_t[]> device_rhs;
cuda::memory::device::unique_ptr<element_t[]> device_result;
};
std::vector<buffer_set_t> generate_buffers(
const cuda::device_t device,
size_t num_kernels,
size_t num_elements)
{
// TODO: This should be an std::array, but generating
// it is a bit tricky and I don't want to burden the example
// with template wizardry
std::vector<buffer_set_t> buffers;
std::generate_n(std::back_inserter(buffers), num_kernels,
[&]() {
return buffer_set_t {
// Sticking to C++11 here...
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements)
};
}
);
// TODO: Consider actually filling the buffers
return buffers;
}
int main(int argc, char **argv)
{
constexpr size_t num_kernels = 5;
constexpr size_t num_elements = 1e7;
auto device = cuda::device::current::get();
std::cout << "Using CUDA device " << device.name() << " (having ID " << device.id() << ")\n";
std::cout << "Generating host buffers... " << std::flush;
auto buffers = generate_buffers(device, num_kernels, num_elements);
std::cout << "done.\n" << std::flush;
std::vector<cuda::stream_t> streams;
streams.reserve(num_kernels);
std::generate_n(std::back_inserter(streams), num_kernels,
[&]() { return device.create_stream(cuda::stream::async); });
auto common_launch_config = make_linear_launch_config(device, num_elements);
auto buffer_size = num_elements * sizeof(element_t);
std::cout
<< "Running " << num_kernels << " sequences of HtoD-kernel-DtoH, in parallel" << std::endl;
// Unfortunately, we need to use indices here - unless we
// had access to a zip iterator (e.g. boost::zip_iterator)
for(size_t k = 0; k < num_kernels; k++) {
auto& stream = streams[k];
auto& buffer_set = buffers[k];
stream.enqueue.copy(buffer_set.device_lhs.get(), buffer_set.host_lhs.get(), buffer_size);
stream.enqueue.copy(buffer_set.device_rhs.get(), buffer_set.host_rhs.get(), buffer_size);
stream.enqueue.kernel_launch(
add<element_t>,
common_launch_config,
buffer_set.device_lhs.get(),
buffer_set.device_rhs.get(),
buffer_set.device_result.get(),
num_elements);
stream.enqueue.copy(buffer_set.host_result.get(), buffer_set.device_result.get(), buffer_size);
stream.enqueue.host_function_call(
[k](cuda::stream_t) {
std::cout
<< "Stream " << k+1 << " of " << num_kernels << " has concluded all work. " << std::endl;
}
);
}
std::this_thread::sleep_for(std::chrono::microseconds(50000));
for(auto& stream : streams) { stream.synchronize(); }
cuda::outstanding_error::ensure_none();
// TODO: Consider checking for correctness here
std::cout << "\nSUCCESS" << std::endl;
}
|
6b079cf740679d28df9e841a918eaaf208e00012.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mapAdjacencyToBlockKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *adjIndexes = NULL;
hipMalloc(&adjIndexes, XSIZE*YSIZE);
int *adjacency = NULL;
hipMalloc(&adjacency, XSIZE*YSIZE);
int *adjacencyBlockLabel = NULL;
hipMalloc(&adjacencyBlockLabel, XSIZE*YSIZE);
int *blockMappedAdjacency = NULL;
hipMalloc(&blockMappedAdjacency, XSIZE*YSIZE);
int *fineAggregate = NULL;
hipMalloc(&fineAggregate, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mapAdjacencyToBlockKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mapAdjacencyToBlockKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mapAdjacencyToBlockKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6b079cf740679d28df9e841a918eaaf208e00012.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mapAdjacencyToBlockKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *adjIndexes = NULL;
cudaMalloc(&adjIndexes, XSIZE*YSIZE);
int *adjacency = NULL;
cudaMalloc(&adjacency, XSIZE*YSIZE);
int *adjacencyBlockLabel = NULL;
cudaMalloc(&adjacencyBlockLabel, XSIZE*YSIZE);
int *blockMappedAdjacency = NULL;
cudaMalloc(&blockMappedAdjacency, XSIZE*YSIZE);
int *fineAggregate = NULL;
cudaMalloc(&fineAggregate, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mapAdjacencyToBlockKernel<<<gridBlock,threadBlock>>>(size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mapAdjacencyToBlockKernel<<<gridBlock,threadBlock>>>(size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mapAdjacencyToBlockKernel<<<gridBlock,threadBlock>>>(size,adjIndexes,adjacency,adjacencyBlockLabel,blockMappedAdjacency,fineAggregate);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bc46595debd22e8f55c4b97c3f8fa62fed948447.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
} | bc46595debd22e8f55c4b97c3f8fa62fed948447.cu | #include "includes.h"
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
} |
f246523fd685ba872e5c718207fa1f0c722b69d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <isotream>
struct DataBlock{
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
// clean up memory allocated on the GPU
void cleanup(DataBlock *d){
hipFree(d->dev_bitmap);
}
int main(void){
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
hipMalloc((void**)&data.dev_bitmap, bitmap.image_size());
bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
}
void generate_frame(DataBlock *d, int ticks){
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(threads), 0, 0, d->dev_bitmap,ticks);
hipMemcpy(d->bitmap->get_ptr(),d->dev_bitmap,d->bitmap->image_size(),hipMemcpyDeviceToHost);
}
__global__ void kernel(unsigned char *ptr, int ticks){
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrt(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d/10.0f - ticks/7.0f)/(d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
| f246523fd685ba872e5c718207fa1f0c722b69d4.cu | #include <isotream>
struct DataBlock{
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
// clean up memory allocated on the GPU
void cleanup(DataBlock *d){
cudaFree(d->dev_bitmap);
}
int main(void){
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size());
bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
}
void generate_frame(DataBlock *d, int ticks){
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<blocks,threads>>>(d->dev_bitmap,ticks);
cudaMemcpy(d->bitmap->get_ptr(),d->dev_bitmap,d->bitmap->image_size(),cudaMemcpyDeviceToHost);
}
__global__ void kernel(unsigned char *ptr, int ticks){
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrt(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d/10.0f - ticks/7.0f)/(d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
|
1bffe04a17b3ada9f62cf0d35a5dc44c3aa619df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <nba/engines/cuda/utils.hh>
#include "IPsecAuthHMACSHA1_kernel.hh"
/* Compatibility definitions. */
#include <nba/engines/cuda/compat.hh>
/* The index is given by the order in get_used_datablocks(). */
#define dbid_enc_payloads_d (0)
#define dbid_flow_ids_d (1)
#define SHA1_THREADS_PER_BLK 32
extern "C" {
//__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE];
__device__ uint32_t swap(uint32_t v) {
return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8)
| ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24);
}
typedef struct hash_digest {
uint32_t h1;
uint32_t h2;
uint32_t h3;
uint32_t h4;
uint32_t h5;
} hash_digest_t;
#define HMAC
__inline__ __device__ void getBlock(char* buf, int offset, int len, uint32_t* dest)
{
uint32_t *tmp;
unsigned int tempbuf[16];
tmp = (uint32_t*) (buf + offset);
//printf("%d %d\n", offset, len);
if (offset + 64 <= len) {
//printf("--0--\n");
#pragma unroll 16
for (int i = 0; i < 16; i++) {
dest[i] = swap(tmp[i]);
}
} else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding
//prtinf("--1--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
//printf("%d %d\n",offset,i);
//printf("%p %p\n", buf, dest);
//tempbuf[i] = buf[i];
tempbuf[i] = swap(tmp[i]);
}
//printf("len%%4 %d\n",len%4);
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 14; i++) {
tempbuf[i] = 0;
}
#pragma unroll 14
for (i = 0; i < 14; i++) {
dest[i] = tempbuf[i];
}
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding
//printf("--2--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
tempbuf[i] = swap(tmp[i]);
}
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 16; i++) {
tempbuf[i] = 0x00000000;
}
#pragma unroll 16
for (i = 0; i < 16; i++) {
dest[i] = tempbuf[i];
}
} else if (offset == len) { //message end is aligned in 64 bytes
//printf("--3--\n");
dest[0] = swap(0x00000080);
#pragma unroll 13
for (int i = 1; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (offset > len) { //the last block in case 2
//printf("--4--\n");
#pragma unroll 14
for (int i = 0; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else {
printf("Not supposed to happen\n");
}
}
__device__ void computeSHA1Block(char* in, uint32_t* w, int offset, int len,
hash_digest_t &h) {
uint32_t a = h.h1;
uint32_t b = h.h2;
uint32_t c = h.h3;
uint32_t d = h.h4;
uint32_t e = h.h5;
uint32_t f;
uint32_t k;
uint32_t temp;
getBlock(in, offset, len, w);
//for (int i = 0; i < 16 ; i++) {
// printf("%0X\n", w[i]);
//}
//printf("\n");
k = 0x5A827999;
//0 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//1 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//2 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//3 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//4 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//5 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//6 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//7 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//8 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//9 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//10 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//11 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//12 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//13 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//14 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//15 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//16 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//17 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//18 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//19 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
k = 0x6ED9EBA1;
//20 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//21 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//22 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//23 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//24 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//25 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//26 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//27 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//28 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//29 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//30 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//31 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//32 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//33 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//34 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//35 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//36 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//37 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//38 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//39 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
k = 0x8F1BBCDC;
//40 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//41 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//42 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//43 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//44 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//45 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//46 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//47 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//48 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//49 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//50 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//51 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//52 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//53 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//54 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//55 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//56 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//57 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//58 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//59 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
k = 0xCA62C1D6;
//60 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//61 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//62 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//63 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//64 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//65 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//66 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//67 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//68 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//69 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//70 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//71 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//72 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//73 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//74 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//75 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//76 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//77 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//78 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//79 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
h.h1 += a;
h.h2 += b;
h.h3 += c;
h.h4 += d;
h.h5 += e;
}
/*
__global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N)
{
//__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK];
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;//w_shared + 16*threadIdx.x;
hash_digest_t h;
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
int num_iter = (len[index]+63+9)/64;
printf("num_iter %d\n", num_iter);
for(int i = 0; i < num_iter; i++)
computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h);
h.h1 = swap(h.h1);
h.h2 = swap(h.h2);
h.h3 = swap(h.h3);
h.h4 = swap(h.h4);
h.h5 = swap(h.h5);
uint32_t * out = (uint32_t*)(output + index*20);
*(out++) = h.h1;
*(out++) = h.h2;
*(out++) = h.h3;
*(out++) = h.h4;
*(out++) = h.h5;
}
}*/
/*
some how *pad = *pad++ ^ *key++
was optimized and does not work correctly in GPU oTL.
*/
__device__ void xorpads(uint32_t *pad, const uint32_t* key) {
#pragma unroll 16
for (int i = 0; i < 16; i++)
*(pad + i) = *(pad + i) ^ *(key + i);
}
uint32_t opad[16] =
{ 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, };
uint32_t ipad[16] =
{ 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, };
// in: start pointer of the data to be authenticated by hsha1.
// out: start pointer of the data where hsha1 signature will be recorded.
// length: length of the data to be authenticated by hsha1.
// key: hmac key.
__device__ void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length,
const char *key) {
uint32_t w_register[16];
uint32_t *w = w_register; //w_shared + 16*threadIdx.x;
hash_digest_t h;
for (int i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*) (key));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA1 compute on mesage
int num_iter = (length + 63 + 9) / 64;
for (int i = 0; i < num_iter; i++)
computeSHA1Block((char*) in, w, i * 64, length, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (int i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*) (key));
//SHA 1 compute on opads
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*) out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
}
#if 0
__global__ void computeHMAC_SHA1(char* buf, char* keys, uint32_t *offsets,
uint32_t *lengths, uint32_t *outputs, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + outputs[index]);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
__global__ void computeHMAC_SHA1_2(char* buf, char* keys, uint32_t *offsets,
uint16_t *lengths, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + offset + length);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
#endif
__global__ void computeHMAC_SHA1_3(
struct datablock_kernel_arg *datablocks,
uint32_t count, uint16_t *batch_ids, uint16_t *item_ids,
uint8_t *checkbits_d,
struct hmac_sa_entry *hmac_key_array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count && count != 0) {
const uint16_t batch_idx = batch_ids[idx];
const uint16_t item_idx = item_ids[idx];
assert(item_idx < 64);
const struct datablock_kernel_arg *db_enc_payloads = &datablocks[dbid_enc_payloads_d];
const struct datablock_kernel_arg *db_flow_ids = &datablocks[dbid_flow_ids_d];
const uint8_t *enc_payload_base = (uint8_t *) db_enc_payloads->buffer_bases_in[batch_idx];
const uintptr_t offset = (uintptr_t) db_enc_payloads->item_offsets_in[batch_idx][item_idx];
const uintptr_t length = (uintptr_t) db_enc_payloads->item_sizes_in[batch_idx][item_idx];
if (enc_payload_base != NULL && offset != 0 && length != 0) {
const uint64_t flow_id = ((uint64_t *) db_flow_ids->buffer_bases_in[batch_idx])[item_idx];
if (flow_id != 65536 && flow_id < 1024) {
//assert(flow_id < 1024);
const char *hmac_key = (char *) hmac_key_array[flow_id].hmac_key;
HMAC_SHA1((uint32_t *) (enc_payload_base + offset),
(uint32_t *) (enc_payload_base + offset + length),
length, hmac_key);
}
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != NULL)
checkbits_d[blockIdx.x] = 1;
}
}
void *nba::ipsec_hsha1_encryption_get_cuda_kernel() {
return reinterpret_cast<void *> (computeHMAC_SHA1_3);
}
// vim: ts=8 sts=4 sw=4 et
| 1bffe04a17b3ada9f62cf0d35a5dc44c3aa619df.cu | #include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <nba/engines/cuda/utils.hh>
#include "IPsecAuthHMACSHA1_kernel.hh"
/* Compatibility definitions. */
#include <nba/engines/cuda/compat.hh>
/* The index is given by the order in get_used_datablocks(). */
#define dbid_enc_payloads_d (0)
#define dbid_flow_ids_d (1)
#define SHA1_THREADS_PER_BLK 32
extern "C" {
//__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE];
__device__ uint32_t swap(uint32_t v) {
return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8)
| ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24);
}
typedef struct hash_digest {
uint32_t h1;
uint32_t h2;
uint32_t h3;
uint32_t h4;
uint32_t h5;
} hash_digest_t;
#define HMAC
__inline__ __device__ void getBlock(char* buf, int offset, int len, uint32_t* dest)
{
uint32_t *tmp;
unsigned int tempbuf[16];
tmp = (uint32_t*) (buf + offset);
//printf("%d %d\n", offset, len);
if (offset + 64 <= len) {
//printf("--0--\n");
#pragma unroll 16
for (int i = 0; i < 16; i++) {
dest[i] = swap(tmp[i]);
}
} else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding
//prtinf("--1--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
//printf("%d %d\n",offset,i);
//printf("%p %p\n", buf, dest);
//tempbuf[i] = buf[i];
tempbuf[i] = swap(tmp[i]);
}
//printf("len%%4 %d\n",len%4);
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 14; i++) {
tempbuf[i] = 0;
}
#pragma unroll 14
for (i = 0; i < 14; i++) {
dest[i] = tempbuf[i];
}
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding
//printf("--2--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
tempbuf[i] = swap(tmp[i]);
}
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 16; i++) {
tempbuf[i] = 0x00000000;
}
#pragma unroll 16
for (i = 0; i < 16; i++) {
dest[i] = tempbuf[i];
}
} else if (offset == len) { //message end is aligned in 64 bytes
//printf("--3--\n");
dest[0] = swap(0x00000080);
#pragma unroll 13
for (int i = 1; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (offset > len) { //the last block in case 2
//printf("--4--\n");
#pragma unroll 14
for (int i = 0; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else {
printf("Not supposed to happen\n");
}
}
__device__ void computeSHA1Block(char* in, uint32_t* w, int offset, int len,
hash_digest_t &h) {
uint32_t a = h.h1;
uint32_t b = h.h2;
uint32_t c = h.h3;
uint32_t d = h.h4;
uint32_t e = h.h5;
uint32_t f;
uint32_t k;
uint32_t temp;
getBlock(in, offset, len, w);
//for (int i = 0; i < 16 ; i++) {
// printf("%0X\n", w[i]);
//}
//printf("\n");
k = 0x5A827999;
//0 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//1 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//2 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//3 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//4 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//5 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//6 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//7 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//8 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//9 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//10 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//11 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//12 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//13 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//14 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//15 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//16 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//17 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//18 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//19 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
k = 0x6ED9EBA1;
//20 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//21 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//22 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//23 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//24 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//25 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//26 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//27 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//28 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//29 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//30 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//31 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//32 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//33 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//34 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//35 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//36 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//37 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//38 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//39 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
k = 0x8F1BBCDC;
//40 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//41 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//42 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//43 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//44 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//45 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//46 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//47 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//48 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//49 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//50 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//51 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//52 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//53 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//54 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//55 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//56 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//57 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//58 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//59 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
k = 0xCA62C1D6;
//60 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//61 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//62 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//63 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//64 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//65 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//66 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//67 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//68 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//69 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//70 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//71 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//72 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//73 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//74 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//75 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//76 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//77 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//78 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//79 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
h.h1 += a;
h.h2 += b;
h.h3 += c;
h.h4 += d;
h.h5 += e;
}
/*
__global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N)
{
//__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK];
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;//w_shared + 16*threadIdx.x;
hash_digest_t h;
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
int num_iter = (len[index]+63+9)/64;
printf("num_iter %d\n", num_iter);
for(int i = 0; i < num_iter; i++)
computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h);
h.h1 = swap(h.h1);
h.h2 = swap(h.h2);
h.h3 = swap(h.h3);
h.h4 = swap(h.h4);
h.h5 = swap(h.h5);
uint32_t * out = (uint32_t*)(output + index*20);
*(out++) = h.h1;
*(out++) = h.h2;
*(out++) = h.h3;
*(out++) = h.h4;
*(out++) = h.h5;
}
}*/
/*
some how *pad = *pad++ ^ *key++
was optimized and does not work correctly in GPU oTL.
*/
__device__ void xorpads(uint32_t *pad, const uint32_t* key) {
#pragma unroll 16
for (int i = 0; i < 16; i++)
*(pad + i) = *(pad + i) ^ *(key + i);
}
uint32_t opad[16] =
{ 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, };
uint32_t ipad[16] =
{ 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, };
// in: start pointer of the data to be authenticated by hsha1.
// out: start pointer of the data where hsha1 signature will be recorded.
// length: length of the data to be authenticated by hsha1.
// key: hmac key.
__device__ void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length,
const char *key) {
uint32_t w_register[16];
uint32_t *w = w_register; //w_shared + 16*threadIdx.x;
hash_digest_t h;
for (int i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*) (key));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA1 compute on mesage
int num_iter = (length + 63 + 9) / 64;
for (int i = 0; i < num_iter; i++)
computeSHA1Block((char*) in, w, i * 64, length, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (int i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*) (key));
//SHA 1 compute on opads
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*) out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
}
#if 0
__global__ void computeHMAC_SHA1(char* buf, char* keys, uint32_t *offsets,
uint32_t *lengths, uint32_t *outputs, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + outputs[index]);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
__global__ void computeHMAC_SHA1_2(char* buf, char* keys, uint32_t *offsets,
uint16_t *lengths, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + offset + length);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
#endif
__global__ void computeHMAC_SHA1_3(
struct datablock_kernel_arg *datablocks,
uint32_t count, uint16_t *batch_ids, uint16_t *item_ids,
uint8_t *checkbits_d,
struct hmac_sa_entry *hmac_key_array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count && count != 0) {
const uint16_t batch_idx = batch_ids[idx];
const uint16_t item_idx = item_ids[idx];
assert(item_idx < 64);
const struct datablock_kernel_arg *db_enc_payloads = &datablocks[dbid_enc_payloads_d];
const struct datablock_kernel_arg *db_flow_ids = &datablocks[dbid_flow_ids_d];
const uint8_t *enc_payload_base = (uint8_t *) db_enc_payloads->buffer_bases_in[batch_idx];
const uintptr_t offset = (uintptr_t) db_enc_payloads->item_offsets_in[batch_idx][item_idx];
const uintptr_t length = (uintptr_t) db_enc_payloads->item_sizes_in[batch_idx][item_idx];
if (enc_payload_base != NULL && offset != 0 && length != 0) {
const uint64_t flow_id = ((uint64_t *) db_flow_ids->buffer_bases_in[batch_idx])[item_idx];
if (flow_id != 65536 && flow_id < 1024) {
//assert(flow_id < 1024);
const char *hmac_key = (char *) hmac_key_array[flow_id].hmac_key;
HMAC_SHA1((uint32_t *) (enc_payload_base + offset),
(uint32_t *) (enc_payload_base + offset + length),
length, hmac_key);
}
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != NULL)
checkbits_d[blockIdx.x] = 1;
}
}
void *nba::ipsec_hsha1_encryption_get_cuda_kernel() {
return reinterpret_cast<void *> (computeHMAC_SHA1_3);
}
// vim: ts=8 sts=4 sw=4 et
|
2cccd2bf9cf66ec30bce0dab2d23ce032ffedbea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
__device__
float sigmoid (float x) {
return (1.f / (1.f + expf(-x)));
}
__global__
void parallelPitched2DAccess (float* devPtr, size_t pitch, int width, int height)
{
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (r < height && c < width) {
float* row = (float*)((char*)devPtr + r * pitch);
row[c] = sigmoid(row[c]);
}
}
__global__
void parallelSimple2DAccess (float* elem, int width, int height)
{
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (r < height && c < width) {
elem[r * width + c] = sigmoid(elem[r * width + c]);
}
}
__global__
void parallelPitched3DAccess (hipPitchedPtr devPitchedPtr, int width, int height, int depth)
{
int z = blockIdx.z * blockDim.z + threadIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (z < depth && y < height && x < width) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
char* slice = devPtr + z * slicePitch;
float* row = (float*)(slice + y * pitch);
row[x] = sigmoid(row[x]);
}
}
__global__
void parallelSimple3DAccess (float* elem, int width, int height, int depth)
{
int z = blockIdx.z * blockDim.z + threadIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (z < depth && y < height && x < width) {
float element = elem[z * height * width + y * width + x];
elem[z * height * width + y * width + x] = sigmoid(element);
}
}
// Host code
void malloc2D (int repeat, int width, int height) {
printf("Dimension: (%d %d)\n", width, height);
dim3 grid ((width + 15)/16, (height + 15)/16);
dim3 block (16, 16);
float* devPtr;
size_t pitch;
hipMallocPitch((void**)&devPtr, &pitch, width * sizeof(float), height);
hipLaunchKernelGGL(( parallelPitched2DAccess), dim3(grid), dim3(block), 0, 0, devPtr, pitch, width, height);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( parallelPitched2DAccess), dim3(grid), dim3(block), 0, 0, devPtr, pitch, width, height);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipFree(devPtr);
hipMalloc((void**)&devPtr, width * height * sizeof(float));
hipLaunchKernelGGL(( parallelSimple2DAccess), dim3(grid), dim3(block), 0, 0, devPtr, width, height);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( parallelSimple2DAccess), dim3(grid), dim3(block), 0, 0, devPtr, width, height);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time (pitched vs simple): %f %f (us)\n",
(time * 1e-3f) / repeat, (time2 * 1e-3f) / repeat);
hipFree(devPtr);
}
// Host code
void malloc3D (int repeat, int width, int height, int depth) {
printf("Dimension: (%d %d %d)\n", width, height, depth);
dim3 grid ((width + 15)/16, (height + 7)/8, (depth + 3)/4);
dim3 block (16, 8, 4);
hipExtent extent = make_hipExtent(width * sizeof(float), height, depth);
hipPitchedPtr devPitchedPtr;
hipMalloc3D(&devPitchedPtr, extent);
hipLaunchKernelGGL(( parallelPitched3DAccess), dim3(grid), dim3(block), 0, 0, devPitchedPtr, width, height, depth);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( parallelPitched3DAccess), dim3(grid), dim3(block), 0, 0, devPitchedPtr, width, height, depth);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipFree(devPitchedPtr.ptr);
float* devPtr;
hipMalloc(&devPtr, width * height * depth * sizeof(float));
hipLaunchKernelGGL(( parallelSimple3DAccess), dim3(grid), dim3(block), 0, 0, devPtr, width, height, depth);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( parallelSimple3DAccess), dim3(grid), dim3(block), 0, 0, devPtr, width, height, depth);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time (pitched vs simple): %f %f (us)\n",
(time * 1e-3f) / repeat, (time2 * 1e-3f) / repeat);
hipFree(devPtr);
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// width, height and depth
const int w[] = {227, 256, 720, 768, 854, 1280, 1440, 1920, 2048, 3840, 4096};
const int h[] = {227, 256, 480, 576, 480, 720, 1080, 1080, 1080, 2160, 2160};
const int d[] = {1, 3};
for (int i = 0; i < 11; i++)
malloc2D(repeat, w[i], h[i]);
for (int i = 0; i < 11; i++)
for (int j = 0; j < 2; j++)
malloc3D(repeat, w[i], h[i], d[j]);
return 0;
}
| 2cccd2bf9cf66ec30bce0dab2d23ce032ffedbea.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
__device__
float sigmoid (float x) {
return (1.f / (1.f + expf(-x)));
}
__global__
void parallelPitched2DAccess (float* devPtr, size_t pitch, int width, int height)
{
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (r < height && c < width) {
float* row = (float*)((char*)devPtr + r * pitch);
row[c] = sigmoid(row[c]);
}
}
__global__
void parallelSimple2DAccess (float* elem, int width, int height)
{
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (r < height && c < width) {
elem[r * width + c] = sigmoid(elem[r * width + c]);
}
}
__global__
void parallelPitched3DAccess (cudaPitchedPtr devPitchedPtr, int width, int height, int depth)
{
int z = blockIdx.z * blockDim.z + threadIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (z < depth && y < height && x < width) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
char* slice = devPtr + z * slicePitch;
float* row = (float*)(slice + y * pitch);
row[x] = sigmoid(row[x]);
}
}
__global__
void parallelSimple3DAccess (float* elem, int width, int height, int depth)
{
int z = blockIdx.z * blockDim.z + threadIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (z < depth && y < height && x < width) {
float element = elem[z * height * width + y * width + x];
elem[z * height * width + y * width + x] = sigmoid(element);
}
}
// Host code
void malloc2D (int repeat, int width, int height) {
printf("Dimension: (%d %d)\n", width, height);
dim3 grid ((width + 15)/16, (height + 15)/16);
dim3 block (16, 16);
float* devPtr;
size_t pitch;
cudaMallocPitch((void**)&devPtr, &pitch, width * sizeof(float), height);
parallelPitched2DAccess<<<grid, block>>>(devPtr, pitch, width, height);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
parallelPitched2DAccess<<<grid, block>>>(devPtr, pitch, width, height);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
cudaFree(devPtr);
cudaMalloc((void**)&devPtr, width * height * sizeof(float));
parallelSimple2DAccess<<<grid, block>>>(devPtr, width, height);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
parallelSimple2DAccess<<<grid, block>>>(devPtr, width, height);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time (pitched vs simple): %f %f (us)\n",
(time * 1e-3f) / repeat, (time2 * 1e-3f) / repeat);
cudaFree(devPtr);
}
// Host code
void malloc3D (int repeat, int width, int height, int depth) {
printf("Dimension: (%d %d %d)\n", width, height, depth);
dim3 grid ((width + 15)/16, (height + 7)/8, (depth + 3)/4);
dim3 block (16, 8, 4);
cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth);
cudaPitchedPtr devPitchedPtr;
cudaMalloc3D(&devPitchedPtr, extent);
parallelPitched3DAccess<<<grid, block>>>(devPitchedPtr, width, height, depth);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
parallelPitched3DAccess<<<grid, block>>>(devPitchedPtr, width, height, depth);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
cudaFree(devPitchedPtr.ptr);
float* devPtr;
cudaMalloc(&devPtr, width * height * depth * sizeof(float));
parallelSimple3DAccess<<<grid, block>>>(devPtr, width, height, depth);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
parallelSimple3DAccess<<<grid, block>>>(devPtr, width, height, depth);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time (pitched vs simple): %f %f (us)\n",
(time * 1e-3f) / repeat, (time2 * 1e-3f) / repeat);
cudaFree(devPtr);
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// width, height and depth
const int w[] = {227, 256, 720, 768, 854, 1280, 1440, 1920, 2048, 3840, 4096};
const int h[] = {227, 256, 480, 576, 480, 720, 1080, 1080, 1080, 2160, 2160};
const int d[] = {1, 3};
for (int i = 0; i < 11; i++)
malloc2D(repeat, w[i], h[i]);
for (int i = 0; i < 11; i++)
for (int j = 0; j < 2; j++)
malloc3D(repeat, w[i], h[i], d[j]);
return 0;
}
|
3ec353de903963d45d6f6f140775ac2c1adf163e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
//
//
__global__ void gpu_shared_memory(float* d_a) {
int i, index = threadIdx.x; // 0
float average, sum = 0.0f;
//
__shared__ float sh_arr[10];
sh_arr[index] = d_a[index]; //
//
__syncthreads();
for (i = 0; i <= index; i++) //
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
d_a[index] = average;
sh_arr[index] = average; //
}
int main(void)
{
float h_a[10], * d_a;
//
for (int i = 0; i < 10; i++)
{
h_a[i] = i;
}
hipMalloc((void **)&d_a, 10 * sizeof(float));
hipMemcpy((void *)d_a, (void *)h_a, 10 * sizeof(float), hipMemcpyHostToDevice);
gpu_shared_memory << <1, 10 >> > (d_a);
hipMemcpy((void*)h_a, (void*)d_a, 10 * sizeof(float), hipMemcpyDeviceToHost);
printf("Use of shared Memory on GPU\n");
for (int i = 0; i < 10; i++)
{
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
} | 3ec353de903963d45d6f6f140775ac2c1adf163e.cu | #include "cuda_runtime.h"
#include <stdio.h>
#include <iostream>
#include <cuda.h>
// 以下代码是为了演示共享内存和线程同步的使用
// 其效果是计算均值
__global__ void gpu_shared_memory(float* d_a) {
int i, index = threadIdx.x; // 索引从0开始
float average, sum = 0.0f;
// 定义共享内存
__shared__ float sh_arr[10];
sh_arr[index] = d_a[index]; // 将数据从全局内存写入到共享内存中
// 下面这个指令的作用是确保在继续执行程序之前先完成对内存的所有写入操作
__syncthreads();
for (i = 0; i <= index; i++) // 并行循环
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
d_a[index] = average;
sh_arr[index] = average; // 这里只是为了演示一下共享内存的生存周期,注意译者注
}
int main(void)
{
float h_a[10], * d_a;
// 数值赋值
for (int i = 0; i < 10; i++)
{
h_a[i] = i;
}
cudaMalloc((void **)&d_a, 10 * sizeof(float));
cudaMemcpy((void *)d_a, (void *)h_a, 10 * sizeof(float), cudaMemcpyHostToDevice);
gpu_shared_memory << <1, 10 >> > (d_a);
cudaMemcpy((void*)h_a, (void*)d_a, 10 * sizeof(float), cudaMemcpyDeviceToHost);
printf("Use of shared Memory on GPU\n");
for (int i = 0; i < 10; i++)
{
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
} |
aa4e220791587558896a07b9db474658bdc55362.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernel1.h"
//extern __shared__ float sdata[];
////////////////////////////////////////////////////////////////////////////////
//! Weighted Jacobi Iteration
//! @param g_dataA input data in global memory
//! @param g_dataB output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void k1( float* g_dataA, float* g_dataB, int floatpitch, int width)
{
extern __shared__ float s_data[];
// TODO, implement this kernel below
// global thread
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
// increment to start off not on the row 0 or column 0
i ++;
j ++;
unsigned int sharedMemorySize = blockDim.x + 2; // this is the shared memory size which has 2 more columns than the block size
// increment to start off not on the row 0 or column 0 in shared memory
int shared_i_index = threadIdx.y + 1;
int shared_j_index = threadIdx.x + 1;
if(i < width - 1|| j < width - 1) // to make sure it doesn't get the last row or last column since we dont want to change that
{
// we get the element into shared memory along with the one above and below it
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index] = g_dataA[(i-1) * floatpitch + j];
s_data[shared_i_index * sharedMemorySize + shared_j_index] = g_dataA[i * floatpitch + j];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index] = g_dataA[(i+1) * floatpitch + j];
if(shared_j_index == 1) // if we are one the first index of the block, we want to pass in the column before it into shared memory
{
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index-1] = g_dataA[(i-1) * floatpitch + j-1];
s_data[shared_i_index * sharedMemorySize + shared_j_index-1] = g_dataA[i * floatpitch + j-1];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index-1] = g_dataA[(i+1) * floatpitch + j-1];
}
if(shared_j_index == sharedMemorySize -2) // if we are the last index of the block then we want to pass also the next column into share memory
{
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index+1] = g_dataA[(i-1) * floatpitch + j+1];
s_data[shared_i_index * sharedMemorySize + shared_j_index+1] = g_dataA[i * floatpitch + j+1];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index+1] = g_dataA[(i+1) * floatpitch + j+1];
}
}
__syncthreads();
/*
if(blockIdx.x == 2 && blockIdx.y == 13 && threadIdx.x == 0 && threadIdx.y == 0)
{
for(int u = 0; u < 3; u++)
{
for(int y = 0; y < sharedMemorySize; y++)
{
printf("%04.2f ", s_data[u*sharedMemorySize + y]);
}
printf("\n");
}
printf("\n");
}
*/
if( i >= width - 1|| j >= width - 1 || i < 1 || j < 1 ) return; // return the ones that are out of range
g_dataB[i * floatpitch + j] = (
0.2f * s_data[shared_i_index * sharedMemorySize + shared_j_index] + //itself
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index ] + //N
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + (shared_j_index+1)] + //NE
0.1f * s_data[ shared_i_index * sharedMemorySize + (shared_j_index+1)] + //E
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + (shared_j_index+1)] + //SE
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index ] + //S
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + (shared_j_index-1)] + //SW
0.1f * s_data[ shared_i_index * sharedMemorySize + (shared_j_index-1)] + //W
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + (shared_j_index-1)] //NW
) * 0.95f;
}
| aa4e220791587558896a07b9db474658bdc55362.cu | #include <stdio.h>
#include "kernel1.h"
//extern __shared__ float sdata[];
////////////////////////////////////////////////////////////////////////////////
//! Weighted Jacobi Iteration
//! @param g_dataA input data in global memory
//! @param g_dataB output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void k1( float* g_dataA, float* g_dataB, int floatpitch, int width)
{
extern __shared__ float s_data[];
// TODO, implement this kernel below
// global thread
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
// increment to start off not on the row 0 or column 0
i ++;
j ++;
unsigned int sharedMemorySize = blockDim.x + 2; // this is the shared memory size which has 2 more columns than the block size
// increment to start off not on the row 0 or column 0 in shared memory
int shared_i_index = threadIdx.y + 1;
int shared_j_index = threadIdx.x + 1;
if(i < width - 1|| j < width - 1) // to make sure it doesn't get the last row or last column since we dont want to change that
{
// we get the element into shared memory along with the one above and below it
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index] = g_dataA[(i-1) * floatpitch + j];
s_data[shared_i_index * sharedMemorySize + shared_j_index] = g_dataA[i * floatpitch + j];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index] = g_dataA[(i+1) * floatpitch + j];
if(shared_j_index == 1) // if we are one the first index of the block, we want to pass in the column before it into shared memory
{
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index-1] = g_dataA[(i-1) * floatpitch + j-1];
s_data[shared_i_index * sharedMemorySize + shared_j_index-1] = g_dataA[i * floatpitch + j-1];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index-1] = g_dataA[(i+1) * floatpitch + j-1];
}
if(shared_j_index == sharedMemorySize -2) // if we are the last index of the block then we want to pass also the next column into share memory
{
s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index+1] = g_dataA[(i-1) * floatpitch + j+1];
s_data[shared_i_index * sharedMemorySize + shared_j_index+1] = g_dataA[i * floatpitch + j+1];
s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index+1] = g_dataA[(i+1) * floatpitch + j+1];
}
}
__syncthreads();
/*
if(blockIdx.x == 2 && blockIdx.y == 13 && threadIdx.x == 0 && threadIdx.y == 0)
{
for(int u = 0; u < 3; u++)
{
for(int y = 0; y < sharedMemorySize; y++)
{
printf("%04.2f ", s_data[u*sharedMemorySize + y]);
}
printf("\n");
}
printf("\n");
}
*/
if( i >= width - 1|| j >= width - 1 || i < 1 || j < 1 ) return; // return the ones that are out of range
g_dataB[i * floatpitch + j] = (
0.2f * s_data[shared_i_index * sharedMemorySize + shared_j_index] + //itself
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + shared_j_index ] + //N
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + (shared_j_index+1)] + //NE
0.1f * s_data[ shared_i_index * sharedMemorySize + (shared_j_index+1)] + //E
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + (shared_j_index+1)] + //SE
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + shared_j_index ] + //S
0.1f * s_data[(shared_i_index+1) * sharedMemorySize + (shared_j_index-1)] + //SW
0.1f * s_data[ shared_i_index * sharedMemorySize + (shared_j_index-1)] + //W
0.1f * s_data[(shared_i_index-1) * sharedMemorySize + (shared_j_index-1)] //NW
) * 0.95f;
}
|
f5c275db66912d7eab95d533e4cf7c56a4038a24.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
hipFree(arrayptr);
}
delete this;
}
}
| f5c275db66912d7eab95d533e4cf7c56a4038a24.cu | //
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
cudaFree(arrayptr);
}
delete this;
}
}
|
513b40b4ea84d8c014d86f9b6a1496763f77a4ea.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#define N 1<<20
#define BLOCK_SIZE 1<<7
__global__
void add(int* a, int* b, int* c)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = index; i < N; i += stride) {
c[i] = a[i] + b[i];
}
}
int main(void)
{
int *a, *b, *c;
hipMallocManaged(&a, sizeof(int) * N);
hipMallocManaged(&b, sizeof(int) * N);
hipMallocManaged(&c, sizeof(int) * N);
for (size_t i = 0; i < N; ++i) {
a[i] = -i;
b[i] = i * i;
}
size_t threads_num = BLOCK_SIZE;
// May overflow
size_t blocks_num = (N + threads_num - 1) / threads_num;
hipLaunchKernelGGL(( add), dim3(blocks_num), dim3(threads_num), 0, 0, a, b, c);
hipDeviceSynchronize();
for (size_t i = 0; i < N; ++i) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
hipFree(a);
hipFree(b);
hipFree(c);
return 0;
}
| 513b40b4ea84d8c014d86f9b6a1496763f77a4ea.cu | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#define N 1<<20
#define BLOCK_SIZE 1<<7
__global__
void add(int* a, int* b, int* c)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = index; i < N; i += stride) {
c[i] = a[i] + b[i];
}
}
int main(void)
{
int *a, *b, *c;
cudaMallocManaged(&a, sizeof(int) * N);
cudaMallocManaged(&b, sizeof(int) * N);
cudaMallocManaged(&c, sizeof(int) * N);
for (size_t i = 0; i < N; ++i) {
a[i] = -i;
b[i] = i * i;
}
size_t threads_num = BLOCK_SIZE;
// May overflow
size_t blocks_num = (N + threads_num - 1) / threads_num;
add<<<blocks_num, threads_num>>>(a, b, c);
cudaDeviceSynchronize();
for (size_t i = 0; i < N; ++i) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
}
|
b5e5468e7f4a876f63becca6eb7032d09bf63b0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file layer_norm.cu
* \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450).
*/
#include "./layer_norm-inl.h"
using namespace mshadow::cuda;
namespace mxnet {
namespace op {
template <>
void LayerNormGradComputeGeneralImpl<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const TBlob& gamma,
const TBlob& mean,
const TBlob& std,
const TBlob& normalized_data,
const TBlob& ograd_mult,
const TBlob& red_out,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const mshadow::Tensor<gpu, 1, char>& workspace,
const mxnet::TShape& red_dst_shape,
const mxnet::TShape& red_src_shape,
const mxnet::TShape& red_exclude_dst_shape,
const mxnet::TShape& red_exclude_src_shape,
const int channel_size) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<gpu> *s = ctx.get_stream<gpu>();
// Compute normalized_data = (data - mean) / std
BinaryBroadcastRTCCompute {"sub"}(attrs, ctx,
{data, mean},
{kWriteTo}, {normalized_data});
BinaryBroadcastRTCCompute {"div"}(attrs, ctx,
{normalized_data, std},
{kWriteTo}, {normalized_data});
// Calculate grad_beta
if (req[2] != kNullOp) {
BROADCAST_NDIM_SWITCH(red_exclude_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, outputs[2].reshape(red_exclude_dst_shape), req[2], workspace,
ograd.reshape(red_exclude_src_shape), "red::sum{}", NDim, "identity");
});
}
// Calculate grad_gamma, it will be sum(ograd * normalized_data, exclude_axis)
ElemwiseBinaryRTCCompute {"mul"}(attrs, ctx, {normalized_data, ograd},
{kWriteTo}, {ograd_mult});
if (req[1] != kNullOp) {
BROADCAST_NDIM_SWITCH(red_exclude_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, outputs[1].reshape(red_exclude_dst_shape), req[1], workspace,
ograd_mult.reshape(red_exclude_src_shape), "red::sum{}", NDim,
"identity");
});
}
// Calculate grad_data:
// ograd_mult = ograd * gamma / std
// grad_data = ograd_mult - mean(ograd_mult, axis)
// + normalized_data * (-mean(normalized_data * ograd_mult, axis))
if (req[0] != kNullOp) {
BinaryBroadcastRTCCompute {"mul"}(attrs, ctx,
{ograd, gamma},
{kWriteTo}, {ograd_mult});
BinaryBroadcastRTCCompute {"div"}(attrs, ctx,
{ograd_mult, std},
{kWriteTo}, {ograd_mult});
BROADCAST_NDIM_SWITCH(red_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, red_out.reshape(red_dst_shape), kWriteTo, workspace,
ograd_mult.reshape(red_src_shape), "red::sum{}", NDim, "identity");
});
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 1, DType> red_out_tensor = red_out.FlatTo1D<gpu, DType>(s);
red_out_tensor /= scalar<DType>(channel_size);
});
BinaryBroadcastRTCCompute {"sub"}(attrs, ctx,
{ograd_mult, red_out},
{req[0]}, {outputs[0]});
ElemwiseBinaryRTCCompute {"mul"}(attrs, ctx, {ograd_mult, normalized_data},
{kWriteTo}, {ograd_mult});
BROADCAST_NDIM_SWITCH(red_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, red_out.reshape(red_dst_shape), kWriteTo, workspace,
ograd_mult.reshape(red_src_shape), "red::sum{}", NDim, "identity");
});
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 1, DType> red_out_tensor = red_out.FlatTo1D<gpu, DType>(s);
red_out_tensor /= scalar<DType>(- channel_size);
});
BinaryBroadcastRTCCompute {"mul"}(attrs, ctx,
{normalized_data, red_out},
{kAddTo}, {outputs[0]});
}
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl(DType value, int src_lane,
int width = 32, unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_sync(mask, value, src_lane, width);
#else
return __shfl(value, src_lane, width);
#endif
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask,
int width = 32, unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
/* A single updating step of the Welford's online algorithm to calculate the mean and variance.
* The value 'curr' will be accumulated to the (mean, sigma2, count) triplet.
*
*/
template<typename DType, typename IType>
__device__ __forceinline__ void StepWelfordOnlineSum(const DType curr,
DType& mean, //NOLINT
DType& sigma2, //NOLINT
IType& count) { //NOLINT
count += IType(1);
DType delta = curr - mean;
mean += delta / count;
sigma2 += delta * (curr - mean);
}
/* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm.
* The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count)
*
* See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details.
*
* TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count
*/
template<typename DType, typename IType>
__device__ __inline__ void ChanMergePartition(const DType lhs_mean,
const DType lhs_sigma2,
const IType lhs_count,
DType& rhs_mean, //NOLINT
DType& rhs_sigma2, //NOLINT
IType& rhs_count) { //NOLINT
DType delta = rhs_mean - lhs_mean;
DType nA = static_cast<DType>(lhs_count);
DType nB = static_cast<DType>(rhs_count);
rhs_count = nA + nB;
if (rhs_count > DType(0)) {
nA = nA / rhs_count;
nB = nB / rhs_count;
rhs_mean = nA * lhs_mean + nB * rhs_mean;
rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count;
} else {
rhs_mean = DType(0);
rhs_sigma2 = DType(0);
}
}
/* Split the input column into multiple partitions and compute the mean/sigma of each partition.
* Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and
* sigma2 of the column.
*/
template<typename AType, typename DType, typename IType>
__device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals,
const int nchannel,
AType& mean, //NOLINT
AType& sigma2, //NOLINT
IType& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using
// vectorized types like float4.
// Also, to minimize branch divergence, we split the for-loop into two parts.
int l = 4 * tid;
for (; l + 3 < nchannel; l += 4 * nthread) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count);
}
}
for (; l < nchannel; ++l) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count);
}
}
template<>
__device__ __forceinline__
void BlockWelfordOnlineSum<float, mshadow::half::half_t, int>
(const mshadow::half::half_t* __restrict__ col_vals,
const int nchannel,
float& mean, //NOLINT
float& sigma2, //NOLINT
int& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// We cast the input half pointer to half2 to optimize the loading speed.
// Here, we need to notice that CUDA forces memory alignment, i.e.,
// ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0.
// Thus, we need to shift the address of the half pointer to be aligned by half2.
int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0;
int padding = (nchannel - align_shift) % 2;
int half2_size = (nchannel - align_shift) / 2;
const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift);
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (align_shift) {
StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count);
}
if (padding) {
StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count);
}
}
for (int l = tid; l < half2_size; l += nthread) {
float2 ele_val = __half22float2(half2_col_vals[l]);
StepWelfordOnlineSum(ele_val.x, mean, sigma2, count);
StepWelfordOnlineSum(ele_val.y, mean, sigma2, count);
}
}
/* Fused CUDA kernel for the forward pass of layer normalization.
* It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario.
* Shape of the input tensors:
* in_data = (nbatch, nchannel)
* gamma = (nchannel,)
* beta = (nchannel,)
* out_data = (nchannel,)
* mean_data = (nbatch,)
* var_data = (nbatch,)
* It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y)
* Also, when blockDim.y > 1, it requires shared memory that has size:
* sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2
*/
template<typename AType, typename DType, typename IType>
__global__ void LayerNormFusedForwardKernelContig(const int nbatch,
const int nchannel,
const AType eps,
const DType* __restrict__ in_data,
const DType* __restrict__ gamma,
const DType* __restrict__ beta,
DType* __restrict__ out_data,
DType* __restrict__ mean_data,
DType* __restrict__ std_data) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int nthread = blockDim.x * blockDim.y;
IType count = 0;
AType mean = 0;
AType sigma2 = 0;
if (bid < nbatch) {
extern __shared__ char buf[]; // Shared memory
const DType* col_vals = in_data + bid * nchannel;
BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count);
// Merge the mean/sigma2 within a warp
// Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts)
// within a warp of threads.
// After calling the function, threadIdx.x == 0 will store the result of
// the aggregated (mean, sigma2, counts).
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
AType meanB = warp_shfl_xor(mean, mask);
AType sigma2B = warp_shfl_xor(sigma2, mask);
IType countB = warp_shfl_xor(count, mask);
ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count);
}
if (blockDim.y > 1) {
// Inter-warp reduction. Copy the upper-half of the warps to shared memory
// and merge with the lower-half warp
AType* mean_buf = reinterpret_cast<AType*>(buf);
AType* sigma2_buf =
reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x);
IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x);
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
mean_buf[idx] = mean;
sigma2_buf[idx] = sigma2;
count_buf[idx] = count;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count);
}
__syncthreads();
}
// Broadcast the result to all threads
if (threadIdx.y == 0) {
mean_buf[threadIdx.x] = mean;
sigma2_buf[threadIdx.x] = sigma2;
}
__syncthreads();
mean = mean_buf[threadIdx.x];
sigma2 = sigma2_buf[threadIdx.x] / nchannel;
} else {
sigma2 /= nchannel;
}
// Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta
AType std_eps = sqrt(sigma2 + eps);
AType invstd_eps = DType(1.0) / std_eps;
DType* out_col_val = out_data + bid * nchannel;
if (gamma != nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma == nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma != nullptr && beta == nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean));
}
} else {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean));
}
}
// Write the out_data and var_data
if (threadIdx.x == 0 && threadIdx.y == 0) {
mean_data[bid] = static_cast<DType>(mean);
std_data[bid] = static_cast<DType>(std_eps);
}
}
}
template<bool safe_acc = false>
void LayerNormGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
mxnet::TShape data_shape(2, 0);
mxnet::TShape mean_shape(1, 0);
size_t in_ndim = inputs[layernorm::kData].ndim();
data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1);
data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1];
const TBlob in_data = inputs[layernorm::kData].reshape(data_shape);
const TBlob gamma = inputs[layernorm::kGamma];
const TBlob beta = inputs[layernorm::kBeta];
const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape);
const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape);
const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape);
// Make sure the inputs are contiguous
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(beta.CheckContiguous(), true);
CHECK_EQ(out_data.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
// Lauch the kernel. The dynamic shared memory size is
// sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x
int nbatch = data_shape[0];
int nchannel = data_shape[1];
float eps = param.eps;
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
int nthread_y;
const dim3 dimGrid(ngrid_x, ngrid_y);
if (nchannel <= 128) {
nthread_y = 1;
} else if (nchannel <= 512) {
nthread_y = 2;
} else {
nthread_y = 4;
}
hipStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>());
const dim3 dimBlock(32, nthread_y);
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType)
+ (nthread_y / 2) * 32 * sizeof(int) : 0;
CheckLaunchParam(dimGrid, dimBlock);
hipLaunchKernelGGL(( LayerNormFusedForwardKernelContig<AType, DType, int>) , dim3(dimGrid), dim3(dimBlock), nshared, stream,
nbatch, nchannel, static_cast<AType>(eps),
in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(),
out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>());
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig);
}
template<>
void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Try to use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for LayerNorm with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
if (safe_acc) {
return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
/* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1
* (Contiguous case).
* The gradient of gamma and beta are:
* d_gamma = sum(out_grad * (x - mean) / std, axis=0)
* d_beta = sum(out_grad, axis=0)
*
* We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to
* improve the parallelism.
*
* In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used
* to calculate the partial reduction result of each part. Illustrated below:
*
* 1st Block 2nd Block 3rd Block k-th Block
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1]
* part_beta[0] part_beta[1] part_beta[2] part_beta[k-1]
*
*
* In the second step, we sum up the row-values in part_gamma and part_beta.
*
* This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and
* `LayerNormFusedBackwardKernel_GammaBeta` implements the second step.
*/
template<typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
AType* __restrict__ part_gamma_grad,
AType* __restrict__ part_beta_grad) {
extern __shared__ char buf[];
AType* d_buf = reinterpret_cast<AType*>(buf);
const int npart = gridDim.y;
const int block_row_num = (nbatch + npart - 1) / npart;
// The rows are divided into `npart` parts. Each threadblock calculates the reduction result
// within the corresponding row ranges.
int row_stride = blockDim.x + 1;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
int r_begin = blockIdx.y * block_row_num;
int r_end = min((blockIdx.y + 1) * block_row_num, nbatch);
AType* buf_gamma_grad = d_buf;
AType* buf_beta_grad = d_buf + blockDim.y * row_stride;
AType local_gamma_grad = 0;
AType local_beta_grad = 0;
if (c < nchannel) {
for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) {
int r = r_b + threadIdx.y;
if (r < r_end) {
AType local_mean = static_cast<AType>(mean_data[r]);
AType local_std = static_cast<AType>(std_data[r]);
int read_idx = r * nchannel + c;
AType local_in_data = static_cast<AType>(in_data[read_idx]);
AType local_out_grad = static_cast<AType>(out_grad[read_idx]);
local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad;
local_beta_grad += local_out_grad;
}
}
}
buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad;
buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad;
__syncthreads();
for (int offset = blockDim.y/2; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = threadIdx.y * row_stride + threadIdx.x;
int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
if (threadIdx.y == 0 && c < nchannel) {
part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x]
+ buf_gamma_grad[threadIdx.x + row_stride];
part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x]
+ buf_beta_grad[threadIdx.x + row_stride];
}
}
template<bool gamma_addto, bool beta_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch,
const int nchannel,
const int npart,
const AType* __restrict__ part_gamma_grad,
const AType* __restrict__ part_beta_grad,
DType* gamma_grad,
DType* beta_grad) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (c < nchannel) {
extern __shared__ char buf[];
AType* buf_gamma_grad = reinterpret_cast<AType*>(buf);
AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y;
buf_gamma_grad[tid] = 0;
buf_beta_grad[tid] = 0;
for (int r = threadIdx.y; r < npart; r += blockDim.y) {
buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c];
buf_beta_grad[tid] += part_beta_grad[r * nchannel + c];
}
__syncthreads();
// Begin for inter-warp reduce
if (npart > 1) {
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = tid;
int idx2 = tid + offset * blockDim.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
}
if (threadIdx.y == 0) {
if (gamma_grad) {
if (gamma_addto) {
gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]);
} else {
gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]);
}
}
if (beta_grad) {
if (beta_addto) {
beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]);
} else {
beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]);
}
}
}
}
}
/*
*
*
*/
template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_Data(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
const DType* __restrict__ gamma,
DType* data_grad) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int nthread = blockDim.x * blockDim.y;
if (bid < nbatch) {
// Shared memory with size blockDim.y * blockDim.x * sizeof(DType)
extern __shared__ char buf[];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// 1. Calculate: mean(out_grad * gamma / std, axis=-1)
// mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1)
AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType mean = static_cast<AType>(mean_data[bid]);
AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]);
int l = LOAD_UNROLL * tid;
for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) {
#pragma unroll
for (int i = 0; i < LOAD_UNROLL; ++i) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]);
AType ele_gamma = static_cast<AType>(gamma[l + i]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
}
for (; l < nchannel; ++l) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
// Intra-warp reduction (all-reduce)
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
sum_val0 += warp_shfl_xor(sum_val0, mask);
sum_val1 += warp_shfl_xor(sum_val1, mask);
}
// Inter-warp reduction (all-reduce)
if (blockDim.y > 1) {
AType* sum_val0_buf = reinterpret_cast<AType*>(buf);
AType* sum_val1_buf =
reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType));
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
sum_val0_buf[idx] = sum_val0;
sum_val1_buf[idx] = sum_val1;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
sum_val0 += sum_val0_buf[idx];
sum_val1 += sum_val1_buf[idx];
}
__syncthreads();
}
if (threadIdx.y == 0) {
sum_val0_buf[threadIdx.x] = sum_val0;
sum_val1_buf[threadIdx.x] = sum_val1;
}
__syncthreads();
sum_val0 = sum_val0_buf[threadIdx.x];
sum_val1 = sum_val1_buf[threadIdx.x];
}
sum_val0 /= nchannel;
sum_val1 /= nchannel;
// 2. Calculate the gradient as
// out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1
for (int l = tid; l < nchannel; l += nthread) {
AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
if (data_addto) {
data_grad[bid * nchannel + l] +=
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps
- sum_val0 - (ele_x - mean) * invstd_eps * sum_val1);
} else {
data_grad[bid * nchannel + l] =
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0
- (ele_x - mean) * invstd_eps * sum_val1);
}
}
}
}
void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel,
dim3* part_grad_block_dim, dim3* part_grad_grid_dim,
dim3* gb_block_dim, dim3* gb_grid_dim,
int* npart) {
*npart = 16;
*part_grad_block_dim = dim3(32, 16);
*part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart);
*gb_block_dim = dim3(32, *npart);
*gb_grid_dim = dim3((nchannel + 32 - 1) / 32);
CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim);
CheckLaunchParam(*gb_grid_dim, *gb_block_dim);
}
template<bool safe_acc = false>
void LayerNormGradGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 5U);
const TBlob out_grad = inputs[0];
const TBlob in_data = inputs[1];
const TBlob gamma = inputs[2];
const TBlob mean_data = inputs[3];
const TBlob std_data = inputs[4];
const TBlob data_grad = outputs[0];
const TBlob gamma_grad = outputs[1];
const TBlob beta_grad = outputs[2];
// Make sure the inputs are contiguous
CHECK_EQ(out_grad.CheckContiguous(), true);
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1);
int nchannel = in_data.shape_[in_data.ndim() - 1];
int data_grad_req = req[0];
int gamma_grad_req = req[1];
int beta_grad_req = req[2];
CHECK_NE(data_grad_req, kWriteInplace);
CHECK_NE(gamma_grad_req, kWriteInplace);
CHECK_NE(beta_grad_req, kWriteInplace);
Stream<gpu> *s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
// Calculate the gradient for gamma/beta
CHECK_EQ(gamma_grad.CheckContiguous(), true);
CHECK_EQ(beta_grad.CheckContiguous(), true);
dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim;
int npart;
GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim,
&gb_block_dim, &gb_grid_dim, &npart);
if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
Tensor<gpu, 1, AType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s);
AType* part_gamma_grad_ptr = workspace.dptr_;
AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel;
const int nshared_K1 = 2 * (part_grad_block_dim.x + 1)
* part_grad_block_dim.y * sizeof(AType);
const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType);
DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr;
DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr;
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_PartGammaBeta)
, dim3(part_grad_grid_dim), dim3(part_grad_block_dim), nshared_K1, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(),
mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr);
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta);
if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, false>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, true>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, true>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, false>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta);
}
// Calculate the gradient for data
CHECK_EQ(data_grad.CheckContiguous(), true);
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
const dim3 data_grid_dim(ngrid_x, ngrid_y);
int nthread_y;
if (nchannel <= 32) {
nthread_y = 1;
} else if (nchannel <= 128) {
nthread_y = 2;
} else if (nchannel <= 512) {
nthread_y = 4;
} else {
nthread_y = 8;
}
const dim3 data_block_dim(32, nthread_y);
const int LOAD_UNROLL = 4;
if (data_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0;
CheckLaunchParam(data_grid_dim, data_block_dim);
if (data_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType>)
, dim3(data_grid_dim), dim3(data_block_dim), nshared, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
} else {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType>)
, dim3(data_grid_dim), dim3(data_block_dim), nshared, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data);
}
}
template<>
void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (safe_acc) {
return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
| b5e5468e7f4a876f63becca6eb7032d09bf63b0c.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file layer_norm.cu
* \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450).
*/
#include "./layer_norm-inl.h"
using namespace mshadow::cuda;
namespace mxnet {
namespace op {
template <>
void LayerNormGradComputeGeneralImpl<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const TBlob& gamma,
const TBlob& mean,
const TBlob& std,
const TBlob& normalized_data,
const TBlob& ograd_mult,
const TBlob& red_out,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const mshadow::Tensor<gpu, 1, char>& workspace,
const mxnet::TShape& red_dst_shape,
const mxnet::TShape& red_src_shape,
const mxnet::TShape& red_exclude_dst_shape,
const mxnet::TShape& red_exclude_src_shape,
const int channel_size) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<gpu> *s = ctx.get_stream<gpu>();
// Compute normalized_data = (data - mean) / std
BinaryBroadcastRTCCompute {"sub"}(attrs, ctx,
{data, mean},
{kWriteTo}, {normalized_data});
BinaryBroadcastRTCCompute {"div"}(attrs, ctx,
{normalized_data, std},
{kWriteTo}, {normalized_data});
// Calculate grad_beta
if (req[2] != kNullOp) {
BROADCAST_NDIM_SWITCH(red_exclude_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, outputs[2].reshape(red_exclude_dst_shape), req[2], workspace,
ograd.reshape(red_exclude_src_shape), "red::sum{}", NDim, "identity");
});
}
// Calculate grad_gamma, it will be sum(ograd * normalized_data, exclude_axis)
ElemwiseBinaryRTCCompute {"mul"}(attrs, ctx, {normalized_data, ograd},
{kWriteTo}, {ograd_mult});
if (req[1] != kNullOp) {
BROADCAST_NDIM_SWITCH(red_exclude_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, outputs[1].reshape(red_exclude_dst_shape), req[1], workspace,
ograd_mult.reshape(red_exclude_src_shape), "red::sum{}", NDim,
"identity");
});
}
// Calculate grad_data:
// ograd_mult = ograd * gamma / std
// grad_data = ograd_mult - mean(ograd_mult, axis)
// + normalized_data * (-mean(normalized_data * ograd_mult, axis))
if (req[0] != kNullOp) {
BinaryBroadcastRTCCompute {"mul"}(attrs, ctx,
{ograd, gamma},
{kWriteTo}, {ograd_mult});
BinaryBroadcastRTCCompute {"div"}(attrs, ctx,
{ograd_mult, std},
{kWriteTo}, {ograd_mult});
BROADCAST_NDIM_SWITCH(red_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, red_out.reshape(red_dst_shape), kWriteTo, workspace,
ograd_mult.reshape(red_src_shape), "red::sum{}", NDim, "identity");
});
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 1, DType> red_out_tensor = red_out.FlatTo1D<gpu, DType>(s);
red_out_tensor /= scalar<DType>(channel_size);
});
BinaryBroadcastRTCCompute {"sub"}(attrs, ctx,
{ograd_mult, red_out},
{req[0]}, {outputs[0]});
ElemwiseBinaryRTCCompute {"mul"}(attrs, ctx, {ograd_mult, normalized_data},
{kWriteTo}, {ograd_mult});
BROADCAST_NDIM_SWITCH(red_dst_shape.ndim(), NDim, {
broadcast::RTCReduce(ctx, red_out.reshape(red_dst_shape), kWriteTo, workspace,
ograd_mult.reshape(red_src_shape), "red::sum{}", NDim, "identity");
});
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 1, DType> red_out_tensor = red_out.FlatTo1D<gpu, DType>(s);
red_out_tensor /= scalar<DType>(- channel_size);
});
BinaryBroadcastRTCCompute {"mul"}(attrs, ctx,
{normalized_data, red_out},
{kAddTo}, {outputs[0]});
}
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl(DType value, int src_lane,
int width = 32, unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_sync(mask, value, src_lane, width);
#else
return __shfl(value, src_lane, width);
#endif
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask,
int width = 32, unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
/* A single updating step of the Welford's online algorithm to calculate the mean and variance.
* The value 'curr' will be accumulated to the (mean, sigma2, count) triplet.
*
*/
template<typename DType, typename IType>
__device__ __forceinline__ void StepWelfordOnlineSum(const DType curr,
DType& mean, //NOLINT
DType& sigma2, //NOLINT
IType& count) { //NOLINT
count += IType(1);
DType delta = curr - mean;
mean += delta / count;
sigma2 += delta * (curr - mean);
}
/* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm.
* The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count)
*
* See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details.
*
* TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count
*/
template<typename DType, typename IType>
__device__ __inline__ void ChanMergePartition(const DType lhs_mean,
const DType lhs_sigma2,
const IType lhs_count,
DType& rhs_mean, //NOLINT
DType& rhs_sigma2, //NOLINT
IType& rhs_count) { //NOLINT
DType delta = rhs_mean - lhs_mean;
DType nA = static_cast<DType>(lhs_count);
DType nB = static_cast<DType>(rhs_count);
rhs_count = nA + nB;
if (rhs_count > DType(0)) {
nA = nA / rhs_count;
nB = nB / rhs_count;
rhs_mean = nA * lhs_mean + nB * rhs_mean;
rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count;
} else {
rhs_mean = DType(0);
rhs_sigma2 = DType(0);
}
}
/* Split the input column into multiple partitions and compute the mean/sigma of each partition.
* Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and
* sigma2 of the column.
*/
template<typename AType, typename DType, typename IType>
__device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals,
const int nchannel,
AType& mean, //NOLINT
AType& sigma2, //NOLINT
IType& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using
// vectorized types like float4.
// Also, to minimize branch divergence, we split the for-loop into two parts.
int l = 4 * tid;
for (; l + 3 < nchannel; l += 4 * nthread) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count);
}
}
for (; l < nchannel; ++l) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count);
}
}
template<>
__device__ __forceinline__
void BlockWelfordOnlineSum<float, mshadow::half::half_t, int>
(const mshadow::half::half_t* __restrict__ col_vals,
const int nchannel,
float& mean, //NOLINT
float& sigma2, //NOLINT
int& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// We cast the input half pointer to half2 to optimize the loading speed.
// Here, we need to notice that CUDA forces memory alignment, i.e.,
// ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0.
// Thus, we need to shift the address of the half pointer to be aligned by half2.
int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0;
int padding = (nchannel - align_shift) % 2;
int half2_size = (nchannel - align_shift) / 2;
const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift);
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (align_shift) {
StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count);
}
if (padding) {
StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count);
}
}
for (int l = tid; l < half2_size; l += nthread) {
float2 ele_val = __half22float2(half2_col_vals[l]);
StepWelfordOnlineSum(ele_val.x, mean, sigma2, count);
StepWelfordOnlineSum(ele_val.y, mean, sigma2, count);
}
}
/* Fused CUDA kernel for the forward pass of layer normalization.
* It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario.
* Shape of the input tensors:
* in_data = (nbatch, nchannel)
* gamma = (nchannel,)
* beta = (nchannel,)
* out_data = (nchannel,)
* mean_data = (nbatch,)
* var_data = (nbatch,)
* It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y)
* Also, when blockDim.y > 1, it requires shared memory that has size:
* sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2
*/
template<typename AType, typename DType, typename IType>
__global__ void LayerNormFusedForwardKernelContig(const int nbatch,
const int nchannel,
const AType eps,
const DType* __restrict__ in_data,
const DType* __restrict__ gamma,
const DType* __restrict__ beta,
DType* __restrict__ out_data,
DType* __restrict__ mean_data,
DType* __restrict__ std_data) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int nthread = blockDim.x * blockDim.y;
IType count = 0;
AType mean = 0;
AType sigma2 = 0;
if (bid < nbatch) {
extern __shared__ char buf[]; // Shared memory
const DType* col_vals = in_data + bid * nchannel;
BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count);
// Merge the mean/sigma2 within a warp
// Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts)
// within a warp of threads.
// After calling the function, threadIdx.x == 0 will store the result of
// the aggregated (mean, sigma2, counts).
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
AType meanB = warp_shfl_xor(mean, mask);
AType sigma2B = warp_shfl_xor(sigma2, mask);
IType countB = warp_shfl_xor(count, mask);
ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count);
}
if (blockDim.y > 1) {
// Inter-warp reduction. Copy the upper-half of the warps to shared memory
// and merge with the lower-half warp
AType* mean_buf = reinterpret_cast<AType*>(buf);
AType* sigma2_buf =
reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x);
IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x);
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
mean_buf[idx] = mean;
sigma2_buf[idx] = sigma2;
count_buf[idx] = count;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count);
}
__syncthreads();
}
// Broadcast the result to all threads
if (threadIdx.y == 0) {
mean_buf[threadIdx.x] = mean;
sigma2_buf[threadIdx.x] = sigma2;
}
__syncthreads();
mean = mean_buf[threadIdx.x];
sigma2 = sigma2_buf[threadIdx.x] / nchannel;
} else {
sigma2 /= nchannel;
}
// Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta
AType std_eps = sqrt(sigma2 + eps);
AType invstd_eps = DType(1.0) / std_eps;
DType* out_col_val = out_data + bid * nchannel;
if (gamma != nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma == nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma != nullptr && beta == nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean));
}
} else {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean));
}
}
// Write the out_data and var_data
if (threadIdx.x == 0 && threadIdx.y == 0) {
mean_data[bid] = static_cast<DType>(mean);
std_data[bid] = static_cast<DType>(std_eps);
}
}
}
template<bool safe_acc = false>
void LayerNormGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
mxnet::TShape data_shape(2, 0);
mxnet::TShape mean_shape(1, 0);
size_t in_ndim = inputs[layernorm::kData].ndim();
data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1);
data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1];
const TBlob in_data = inputs[layernorm::kData].reshape(data_shape);
const TBlob gamma = inputs[layernorm::kGamma];
const TBlob beta = inputs[layernorm::kBeta];
const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape);
const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape);
const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape);
// Make sure the inputs are contiguous
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(beta.CheckContiguous(), true);
CHECK_EQ(out_data.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
// Lauch the kernel. The dynamic shared memory size is
// sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x
int nbatch = data_shape[0];
int nchannel = data_shape[1];
float eps = param.eps;
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
int nthread_y;
const dim3 dimGrid(ngrid_x, ngrid_y);
if (nchannel <= 128) {
nthread_y = 1;
} else if (nchannel <= 512) {
nthread_y = 2;
} else {
nthread_y = 4;
}
cudaStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>());
const dim3 dimBlock(32, nthread_y);
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType)
+ (nthread_y / 2) * 32 * sizeof(int) : 0;
CheckLaunchParam(dimGrid, dimBlock);
LayerNormFusedForwardKernelContig<AType, DType, int> <<<dimGrid, dimBlock, nshared, stream>>>
(nbatch, nchannel, static_cast<AType>(eps),
in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(),
out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>());
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig);
}
template<>
void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Try to use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for LayerNorm with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
if (safe_acc) {
return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
/* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1
* (Contiguous case).
* The gradient of gamma and beta are:
* d_gamma = sum(out_grad * (x - mean) / std, axis=0)
* d_beta = sum(out_grad, axis=0)
*
* We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to
* improve the parallelism.
*
* In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used
* to calculate the partial reduction result of each part. Illustrated below:
*
* 1st Block 2nd Block 3rd Block k-th Block
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1]
* part_beta[0] part_beta[1] part_beta[2] part_beta[k-1]
*
*
* In the second step, we sum up the row-values in part_gamma and part_beta.
*
* This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and
* `LayerNormFusedBackwardKernel_GammaBeta` implements the second step.
*/
template<typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
AType* __restrict__ part_gamma_grad,
AType* __restrict__ part_beta_grad) {
extern __shared__ char buf[];
AType* d_buf = reinterpret_cast<AType*>(buf);
const int npart = gridDim.y;
const int block_row_num = (nbatch + npart - 1) / npart;
// The rows are divided into `npart` parts. Each threadblock calculates the reduction result
// within the corresponding row ranges.
int row_stride = blockDim.x + 1;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
int r_begin = blockIdx.y * block_row_num;
int r_end = min((blockIdx.y + 1) * block_row_num, nbatch);
AType* buf_gamma_grad = d_buf;
AType* buf_beta_grad = d_buf + blockDim.y * row_stride;
AType local_gamma_grad = 0;
AType local_beta_grad = 0;
if (c < nchannel) {
for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) {
int r = r_b + threadIdx.y;
if (r < r_end) {
AType local_mean = static_cast<AType>(mean_data[r]);
AType local_std = static_cast<AType>(std_data[r]);
int read_idx = r * nchannel + c;
AType local_in_data = static_cast<AType>(in_data[read_idx]);
AType local_out_grad = static_cast<AType>(out_grad[read_idx]);
local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad;
local_beta_grad += local_out_grad;
}
}
}
buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad;
buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad;
__syncthreads();
for (int offset = blockDim.y/2; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = threadIdx.y * row_stride + threadIdx.x;
int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
if (threadIdx.y == 0 && c < nchannel) {
part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x]
+ buf_gamma_grad[threadIdx.x + row_stride];
part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x]
+ buf_beta_grad[threadIdx.x + row_stride];
}
}
template<bool gamma_addto, bool beta_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch,
const int nchannel,
const int npart,
const AType* __restrict__ part_gamma_grad,
const AType* __restrict__ part_beta_grad,
DType* gamma_grad,
DType* beta_grad) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (c < nchannel) {
extern __shared__ char buf[];
AType* buf_gamma_grad = reinterpret_cast<AType*>(buf);
AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y;
buf_gamma_grad[tid] = 0;
buf_beta_grad[tid] = 0;
for (int r = threadIdx.y; r < npart; r += blockDim.y) {
buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c];
buf_beta_grad[tid] += part_beta_grad[r * nchannel + c];
}
__syncthreads();
// Begin for inter-warp reduce
if (npart > 1) {
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = tid;
int idx2 = tid + offset * blockDim.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
}
if (threadIdx.y == 0) {
if (gamma_grad) {
if (gamma_addto) {
gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]);
} else {
gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]);
}
}
if (beta_grad) {
if (beta_addto) {
beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]);
} else {
beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]);
}
}
}
}
}
/*
*
*
*/
template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_Data(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
const DType* __restrict__ gamma,
DType* data_grad) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int nthread = blockDim.x * blockDim.y;
if (bid < nbatch) {
// Shared memory with size blockDim.y * blockDim.x * sizeof(DType)
extern __shared__ char buf[];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// 1. Calculate: mean(out_grad * gamma / std, axis=-1)
// mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1)
AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType mean = static_cast<AType>(mean_data[bid]);
AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]);
int l = LOAD_UNROLL * tid;
for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) {
#pragma unroll
for (int i = 0; i < LOAD_UNROLL; ++i) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]);
AType ele_gamma = static_cast<AType>(gamma[l + i]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
}
for (; l < nchannel; ++l) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
// Intra-warp reduction (all-reduce)
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
sum_val0 += warp_shfl_xor(sum_val0, mask);
sum_val1 += warp_shfl_xor(sum_val1, mask);
}
// Inter-warp reduction (all-reduce)
if (blockDim.y > 1) {
AType* sum_val0_buf = reinterpret_cast<AType*>(buf);
AType* sum_val1_buf =
reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType));
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
sum_val0_buf[idx] = sum_val0;
sum_val1_buf[idx] = sum_val1;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
sum_val0 += sum_val0_buf[idx];
sum_val1 += sum_val1_buf[idx];
}
__syncthreads();
}
if (threadIdx.y == 0) {
sum_val0_buf[threadIdx.x] = sum_val0;
sum_val1_buf[threadIdx.x] = sum_val1;
}
__syncthreads();
sum_val0 = sum_val0_buf[threadIdx.x];
sum_val1 = sum_val1_buf[threadIdx.x];
}
sum_val0 /= nchannel;
sum_val1 /= nchannel;
// 2. Calculate the gradient as
// out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1
for (int l = tid; l < nchannel; l += nthread) {
AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
if (data_addto) {
data_grad[bid * nchannel + l] +=
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps
- sum_val0 - (ele_x - mean) * invstd_eps * sum_val1);
} else {
data_grad[bid * nchannel + l] =
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0
- (ele_x - mean) * invstd_eps * sum_val1);
}
}
}
}
void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel,
dim3* part_grad_block_dim, dim3* part_grad_grid_dim,
dim3* gb_block_dim, dim3* gb_grid_dim,
int* npart) {
*npart = 16;
*part_grad_block_dim = dim3(32, 16);
*part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart);
*gb_block_dim = dim3(32, *npart);
*gb_grid_dim = dim3((nchannel + 32 - 1) / 32);
CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim);
CheckLaunchParam(*gb_grid_dim, *gb_block_dim);
}
template<bool safe_acc = false>
void LayerNormGradGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 5U);
const TBlob out_grad = inputs[0];
const TBlob in_data = inputs[1];
const TBlob gamma = inputs[2];
const TBlob mean_data = inputs[3];
const TBlob std_data = inputs[4];
const TBlob data_grad = outputs[0];
const TBlob gamma_grad = outputs[1];
const TBlob beta_grad = outputs[2];
// Make sure the inputs are contiguous
CHECK_EQ(out_grad.CheckContiguous(), true);
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1);
int nchannel = in_data.shape_[in_data.ndim() - 1];
int data_grad_req = req[0];
int gamma_grad_req = req[1];
int beta_grad_req = req[2];
CHECK_NE(data_grad_req, kWriteInplace);
CHECK_NE(gamma_grad_req, kWriteInplace);
CHECK_NE(beta_grad_req, kWriteInplace);
Stream<gpu> *s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
// Calculate the gradient for gamma/beta
CHECK_EQ(gamma_grad.CheckContiguous(), true);
CHECK_EQ(beta_grad.CheckContiguous(), true);
dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim;
int npart;
GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim,
&gb_block_dim, &gb_grid_dim, &npart);
if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
Tensor<gpu, 1, AType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s);
AType* part_gamma_grad_ptr = workspace.dptr_;
AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel;
const int nshared_K1 = 2 * (part_grad_block_dim.x + 1)
* part_grad_block_dim.y * sizeof(AType);
const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType);
DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr;
DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr;
LayerNormFusedBackwardKernel_PartGammaBeta
<<<part_grad_grid_dim, part_grad_block_dim, nshared_K1, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(),
mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr);
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta);
if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<true, false>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<false, true>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<true, true>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else {
LayerNormFusedBackwardKernel_GammaBeta<false, false>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta);
}
// Calculate the gradient for data
CHECK_EQ(data_grad.CheckContiguous(), true);
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
const dim3 data_grid_dim(ngrid_x, ngrid_y);
int nthread_y;
if (nchannel <= 32) {
nthread_y = 1;
} else if (nchannel <= 128) {
nthread_y = 2;
} else if (nchannel <= 512) {
nthread_y = 4;
} else {
nthread_y = 8;
}
const dim3 data_block_dim(32, nthread_y);
const int LOAD_UNROLL = 4;
if (data_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0;
CheckLaunchParam(data_grid_dim, data_block_dim);
if (data_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType>
<<<data_grid_dim, data_block_dim, nshared, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
} else {
LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType>
<<<data_grid_dim, data_block_dim, nshared, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data);
}
}
template<>
void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (safe_acc) {
return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
|
92df0b41e34b19cdfa99ecdd74af62b42d181c59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
// const int CAFFE_CUDA_NUM_THREADS = 512;
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| 92df0b41e34b19cdfa99ecdd74af62b42d181c59.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
// const int CAFFE_CUDA_NUM_THREADS = 512;
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
15d197b861ff127ee1fec0b23fa1fe1dac16dcb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void hconv_bprop_C64_N64(
float* param_Sum,
unsigned short* param_O,
const unsigned short* param_I,
const unsigned short* param_F,
float param_alpha,
float param_beta,
int param_flags,
int param_offset_K,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_R,
int param_T,
int param_magic_str_w,
int param_shift_str_w,
int param_magic_str_h,
int param_shift_str_h,
int param_magic_str_d,
int param_shift_str_d
) {
__shared__ float share[ 64*8*2 + 64*8*2 + 8];
*param_Sum = share[0];
}
| 15d197b861ff127ee1fec0b23fa1fe1dac16dcb7.cu |
extern "C" __global__ void hconv_bprop_C64_N64(
float* param_Sum,
unsigned short* param_O,
const unsigned short* param_I,
const unsigned short* param_F,
float param_alpha,
float param_beta,
int param_flags,
int param_offset_K,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_R,
int param_T,
int param_magic_str_w,
int param_shift_str_w,
int param_magic_str_h,
int param_shift_str_h,
int param_magic_str_d,
int param_shift_str_d
) {
__shared__ float share[ 64*8*2 + 64*8*2 + 8];
*param_Sum = share[0];
}
|
e426f80244b00b17005318da6bd37ee9912eceb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THGeneral.h"
#include "THHGeneral.h"
#include "THHTensor.h"
#include <assert.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
// backward-compatible LDG
#if __CUDA_ARCH__ >= 350
#define LDG(x) (__ldg(x))
#else
#define LDG(x) (*(x))
#endif
// Maximum elements per thread that we will copy
#define ELEMENTS_PER_THREAD 8L
// Threads per thread block
#define THREADS_PER_BLOCK 32 * 4
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535L
// Maximum number of dimensions allowed for cutorch
#define MAX_DIMS 25
template <typename IndexType>
struct TensorInfo {
float* data;
IndexType sizes[MAX_DIMS];
IndexType strides[MAX_DIMS];
int dims;
};
// This function extracts size/stride information for the kernel.
// Successive dimensions can be collapsed if the size/strides match
// up and thus there are no holes between the dimensions. This is used
// to reduce the complexity of the problem.
template <typename IndexType>
TensorInfo<IndexType>
THCudaTensor_computeTensorInfo(THCState *state, THCudaTensor* t) {
int dims = THCudaTensor_nDimension(state, t);
assert(dims <= MAX_DIMS);
TensorInfo<IndexType> info;
info.data = THCudaTensor_data(state, t);
// Count the number of successive dimensions that can be collapsed, from
// innermost to outermost.
int numCollapsed = 0;
// Find the innermost dimension not of size 1, since dimensions of size 1 are
// collapsible.
int firstNonOneDim = -1;
for (int i = dims - 1; i >= 0; --i) {
if (THCudaTensor_size(state, t, i) != 1) {
firstNonOneDim = i;
break;
}
}
// We guarantee that we are never called with only dimensions of size 1.
assert(firstNonOneDim >= 0);
// Skip the leading size 1 dims
numCollapsed += dims - 1 - firstNonOneDim;
// Now, to determine the other collapsible dims. These are the size/strides
// of the previous inner non-collapsible dim we encounter.
long sizeInner = THCudaTensor_size(state, t, firstNonOneDim);
long strideInner = THCudaTensor_stride(state, t, firstNonOneDim);
for (int i = firstNonOneDim - 1; i >= 0; --i) {
long sizeOuter = THCudaTensor_size(state, t, i);
long strideOuter = THCudaTensor_stride(state, t, i);
// The next outermost dimension can be skipped if size 1
if (sizeOuter == 1) {
++numCollapsed;
continue;
}
// If the next outermost dimension is contiguous with the
// previous non-collapsed one, collapse it
if (strideOuter == strideInner * sizeInner) {
++numCollapsed;
// This is the run of collapsed dimensions' size
sizeInner = sizeInner * sizeOuter;
continue;
}
// Otherwise, this new outer dimension at `i` cannot be collapsed
// and is different from the previous.
sizeInner = sizeOuter;
strideInner = strideOuter;
}
assert(numCollapsed < dims);
info.dims = dims - numCollapsed;
// Determine the sizes of the collapsed dimensions.
int collapsedIndex = dims - numCollapsed - 1;
info.sizes[collapsedIndex] = THCudaTensor_size(state, t, firstNonOneDim);
info.strides[collapsedIndex] = THCudaTensor_stride(state, t, firstNonOneDim);
for (int i = firstNonOneDim - 1; i >= 0; --i) {
long sizeOuter = THCudaTensor_size(state, t, i);
long strideOuter = THCudaTensor_stride(state, t, i);
if (sizeOuter == 1) {
// skip
continue;
}
if (strideOuter ==
info.sizes[collapsedIndex] * info.strides[collapsedIndex]) {
// collapse
info.sizes[collapsedIndex] *= sizeOuter;
continue;
}
// Otherwise, strides don't match; dimension `i` is not collapsible.
--collapsedIndex;
assert(collapsedIndex >= 0);
info.sizes[collapsedIndex] = sizeOuter;
info.strides[collapsedIndex] = strideOuter;
}
// We must have filled all the dimensions we're looking for
assert(collapsedIndex == 0);
// Fill out the remainder dims for sanity.
for (int i = dims - numCollapsed; i < MAX_DIMS; ++i) {
info.sizes[i] = 1;
info.strides[i] = info.strides[dims - numCollapsed - 1] *
info.sizes[dims - numCollapsed - 1];
}
return info;
}
// Returns true if all linear ID -> offset math can be performed using 32 bit
// unsigned math
bool
canUse32BitCopyMath(THCState *state, THCudaTensor* t) {
long elements = THCudaTensor_nElement(state, t);
if (elements >= UINT_MAX) {
return false;
}
long offset = 0;
long linearId = elements - 1;
for (int i = THCudaTensor_nDimension(state, t) - 1; i >= 0; --i) {
long curDimIndex = linearId % THCudaTensor_size(state, t, i);
long curDimOffset = curDimIndex * THCudaTensor_stride(state, t, i);
offset += curDimOffset;
linearId /= THCudaTensor_size(state, t, i);
}
if (offset >= UINT_MAX) {
return false;
}
return true;
}
// Translate a linear ID for the copy to a float offset
template <typename IndexType, int Dims>
__forceinline__ __device__ IndexType
linearIdToOffset(IndexType linearId, const TensorInfo<IndexType>& info) {
IndexType offset = 0;
if (Dims == -1) {
// Use dynamic dims
for (int i = info.dims - 1; i >= 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
} else {
// Use static dims
for (int i = Dims - 1; i >= 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
if (i > 0) {
linearId /= info.sizes[i];
}
}
}
return offset;
}
// Both `src` and `dst` have the same number of total elements, which are copied
// based on a linear id.
template <typename IndexType, int DstDims, int SrcDims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 4, 16)
#endif
__global__ void
THCudaTensor_kernel_copy(TensorInfo<IndexType> dst,
TensorInfo<IndexType> src,
IndexType totalElements) {
const IndexType linearBlockId =
blockIdx.z * gridDim.y * gridDim.x +
blockIdx.y * gridDim.x +
blockIdx.x;
const IndexType startLinearId =
linearBlockId * THREADS_PER_BLOCK * ELEMENTS_PER_THREAD;
IndexType endLinearId =
(linearBlockId + 1) * THREADS_PER_BLOCK * ELEMENTS_PER_THREAD;
endLinearId = endLinearId < totalElements ? endLinearId : totalElements;
for (IndexType linearId = startLinearId + threadIdx.x;
linearId < endLinearId;
linearId += THREADS_PER_BLOCK) {
// Convert `linearId` into an offset of `src`
const IndexType srcOffset =
linearIdToOffset<IndexType, SrcDims>(linearId, src);
// Convert `linearId` into an offset of `dst`
const IndexType dstOffset =
linearIdToOffset<IndexType, DstDims>(linearId, dst);
dst.data[dstOffset] = LDG(&src.data[srcOffset]);
}
}
THC_API void
THCudaTensor_copy(THCState *state, THCudaTensor* dst, THCudaTensor* src) {
long totalElements = THCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
THArgCheck(THCudaTensor_nDimension(state, dst) <= MAX_DIMS, 2,
"Copy only supported for <= 25 dimensions");
THArgCheck(THCudaTensor_nDimension(state, src) <= MAX_DIMS, 3,
"Copy only supported for <= 25 dimensions");
if (THCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool memcpyEligible =
(THCudaTensor_isContiguous(state, dst) && THCudaTensor_isContiguous(state, src)) ||
(totalElements == 1);
if (memcpyEligible) {
THCudaCheck(hipMemcpyAsync(THCudaTensor_data(state, dst),
THCudaTensor_data(state, src),
totalElements * sizeof(float),
hipMemcpyDeviceToDevice));
} else {
// We always work with a THREADS_PER_BLOCK-sized thread block,
// and assume a max sized grid dimension of MAX_GRID_SIZE.
// Each thread will process up to ELEMENTS_PER_THREAD elements.
const dim3 block(THREADS_PER_BLOCK);
long gridTiles = DIVUP(totalElements, block.x * ELEMENTS_PER_THREAD);
THArgCheck(gridTiles <= MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE, 2,
"tensor too large");
long gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
long gridY = 1;
long gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = DIVUP(gridTiles, MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = DIVUP(gridTiles, MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
dim3 grid(gridX, gridY, gridZ);
// It is possible that the tensor dimensions are able to be collapsed,
// and thus we can reduce the actual code complexity of the copy by
// exploiting this knowledge statically, since the div/mod is the
// most expensive part of the operation, more so than memory accesses.
// For instance, when copying a non-contiguous to a contiguous tensor
// (or vice versa), the contiguous tensor can be collapsed to one
// dimension, and the loop to translate the linear index to the array
// index can be similarly collapsed. That is what this unrolling is for.
#define HANDLE_CASE(TYPE, DST, SRC) \
hipLaunchKernelGGL(( THCudaTensor_kernel_copy<TYPE, DST, SRC>) \
, dim3(grid), dim3(block), 0, 0, dstInfo, srcInfo, (TYPE) totalElements); \
#define HANDLE_SRC_CASE(TYPE, DST, SRC) \
{ \
switch (SRC) { \
case 1: \
HANDLE_CASE(TYPE, DST, 1); \
break; \
case 2: \
HANDLE_CASE(TYPE, DST, 2); \
break; \
case 3: \
HANDLE_CASE(TYPE, DST, 3); \
break; \
case 4: \
HANDLE_CASE(TYPE, DST, 4); \
break; \
case 5: \
HANDLE_CASE(TYPE, DST, 5); \
break; \
default: \
HANDLE_CASE(TYPE, -1, -1); \
break; \
} \
}
#define HANDLE_DST_CASE(TYPE, DST, SRC) \
case DST: \
HANDLE_SRC_CASE(TYPE, DST, SRC); \
break;
// Can we use 32-bit integer math in the kernel (the linear ID for the copy
// and the resulting non-linear offset is all computable using 32-bit math?)
// We also use unsigned index math in the kernel, as signed div/mod has
// additional overhead.
if (canUse32BitCopyMath(state, src) && canUse32BitCopyMath(state, dst)) {
TensorInfo<unsigned int> dstInfo =
THCudaTensor_computeTensorInfo<unsigned int>(state, dst);
TensorInfo<unsigned int> srcInfo =
THCudaTensor_computeTensorInfo<unsigned int>(state, src);
switch (dstInfo.dims) {
HANDLE_DST_CASE(unsigned int, 1, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 2, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 3, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 4, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 5, srcInfo.dims);
default:
HANDLE_DST_CASE(unsigned int, -1, srcInfo.dims);
}
} else {
TensorInfo<unsigned long> dstInfo =
THCudaTensor_computeTensorInfo<unsigned long>(state, dst);
TensorInfo<unsigned long> srcInfo =
THCudaTensor_computeTensorInfo<unsigned long>(state, src);
switch (dstInfo.dims) {
HANDLE_DST_CASE(unsigned long, 1, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 2, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 3, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 4, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 5, srcInfo.dims);
default:
HANDLE_DST_CASE(unsigned long, -1, srcInfo.dims);
}
}
#undef HANDLE_CASE
#undef HANDLE_SRC_CASE
#undef HANDLE_DST_CASE
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
#undef DIVUP
#undef LDG
#undef ELEMENTS_PER_THREAD
#undef THREADS_PER_BLOCK
#undef MAX_GRID_SIZE
#undef MAX_DIMS
| e426f80244b00b17005318da6bd37ee9912eceb5.cu | #include "THGeneral.h"
#include "THCGeneral.h"
#include "THCTensor.h"
#include <assert.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
// backward-compatible LDG
#if __CUDA_ARCH__ >= 350
#define LDG(x) (__ldg(x))
#else
#define LDG(x) (*(x))
#endif
// Maximum elements per thread that we will copy
#define ELEMENTS_PER_THREAD 8L
// Threads per thread block
#define THREADS_PER_BLOCK 32 * 4
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535L
// Maximum number of dimensions allowed for cutorch
#define MAX_DIMS 25
template <typename IndexType>
struct TensorInfo {
float* data;
IndexType sizes[MAX_DIMS];
IndexType strides[MAX_DIMS];
int dims;
};
// This function extracts size/stride information for the kernel.
// Successive dimensions can be collapsed if the size/strides match
// up and thus there are no holes between the dimensions. This is used
// to reduce the complexity of the problem.
template <typename IndexType>
TensorInfo<IndexType>
THCudaTensor_computeTensorInfo(THCState *state, THCudaTensor* t) {
int dims = THCudaTensor_nDimension(state, t);
assert(dims <= MAX_DIMS);
TensorInfo<IndexType> info;
info.data = THCudaTensor_data(state, t);
// Count the number of successive dimensions that can be collapsed, from
// innermost to outermost.
int numCollapsed = 0;
// Find the innermost dimension not of size 1, since dimensions of size 1 are
// collapsible.
int firstNonOneDim = -1;
for (int i = dims - 1; i >= 0; --i) {
if (THCudaTensor_size(state, t, i) != 1) {
firstNonOneDim = i;
break;
}
}
// We guarantee that we are never called with only dimensions of size 1.
assert(firstNonOneDim >= 0);
// Skip the leading size 1 dims
numCollapsed += dims - 1 - firstNonOneDim;
// Now, to determine the other collapsible dims. These are the size/strides
// of the previous inner non-collapsible dim we encounter.
long sizeInner = THCudaTensor_size(state, t, firstNonOneDim);
long strideInner = THCudaTensor_stride(state, t, firstNonOneDim);
for (int i = firstNonOneDim - 1; i >= 0; --i) {
long sizeOuter = THCudaTensor_size(state, t, i);
long strideOuter = THCudaTensor_stride(state, t, i);
// The next outermost dimension can be skipped if size 1
if (sizeOuter == 1) {
++numCollapsed;
continue;
}
// If the next outermost dimension is contiguous with the
// previous non-collapsed one, collapse it
if (strideOuter == strideInner * sizeInner) {
++numCollapsed;
// This is the run of collapsed dimensions' size
sizeInner = sizeInner * sizeOuter;
continue;
}
// Otherwise, this new outer dimension at `i` cannot be collapsed
// and is different from the previous.
sizeInner = sizeOuter;
strideInner = strideOuter;
}
assert(numCollapsed < dims);
info.dims = dims - numCollapsed;
// Determine the sizes of the collapsed dimensions.
int collapsedIndex = dims - numCollapsed - 1;
info.sizes[collapsedIndex] = THCudaTensor_size(state, t, firstNonOneDim);
info.strides[collapsedIndex] = THCudaTensor_stride(state, t, firstNonOneDim);
for (int i = firstNonOneDim - 1; i >= 0; --i) {
long sizeOuter = THCudaTensor_size(state, t, i);
long strideOuter = THCudaTensor_stride(state, t, i);
if (sizeOuter == 1) {
// skip
continue;
}
if (strideOuter ==
info.sizes[collapsedIndex] * info.strides[collapsedIndex]) {
// collapse
info.sizes[collapsedIndex] *= sizeOuter;
continue;
}
// Otherwise, strides don't match; dimension `i` is not collapsible.
--collapsedIndex;
assert(collapsedIndex >= 0);
info.sizes[collapsedIndex] = sizeOuter;
info.strides[collapsedIndex] = strideOuter;
}
// We must have filled all the dimensions we're looking for
assert(collapsedIndex == 0);
// Fill out the remainder dims for sanity.
for (int i = dims - numCollapsed; i < MAX_DIMS; ++i) {
info.sizes[i] = 1;
info.strides[i] = info.strides[dims - numCollapsed - 1] *
info.sizes[dims - numCollapsed - 1];
}
return info;
}
// Returns true if all linear ID -> offset math can be performed using 32 bit
// unsigned math
bool
canUse32BitCopyMath(THCState *state, THCudaTensor* t) {
long elements = THCudaTensor_nElement(state, t);
if (elements >= UINT_MAX) {
return false;
}
long offset = 0;
long linearId = elements - 1;
for (int i = THCudaTensor_nDimension(state, t) - 1; i >= 0; --i) {
long curDimIndex = linearId % THCudaTensor_size(state, t, i);
long curDimOffset = curDimIndex * THCudaTensor_stride(state, t, i);
offset += curDimOffset;
linearId /= THCudaTensor_size(state, t, i);
}
if (offset >= UINT_MAX) {
return false;
}
return true;
}
// Translate a linear ID for the copy to a float offset
template <typename IndexType, int Dims>
__forceinline__ __device__ IndexType
linearIdToOffset(IndexType linearId, const TensorInfo<IndexType>& info) {
IndexType offset = 0;
if (Dims == -1) {
// Use dynamic dims
for (int i = info.dims - 1; i >= 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
} else {
// Use static dims
for (int i = Dims - 1; i >= 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
if (i > 0) {
linearId /= info.sizes[i];
}
}
}
return offset;
}
// Both `src` and `dst` have the same number of total elements, which are copied
// based on a linear id.
template <typename IndexType, int DstDims, int SrcDims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 4, 16)
#endif
__global__ void
THCudaTensor_kernel_copy(TensorInfo<IndexType> dst,
TensorInfo<IndexType> src,
IndexType totalElements) {
const IndexType linearBlockId =
blockIdx.z * gridDim.y * gridDim.x +
blockIdx.y * gridDim.x +
blockIdx.x;
const IndexType startLinearId =
linearBlockId * THREADS_PER_BLOCK * ELEMENTS_PER_THREAD;
IndexType endLinearId =
(linearBlockId + 1) * THREADS_PER_BLOCK * ELEMENTS_PER_THREAD;
endLinearId = endLinearId < totalElements ? endLinearId : totalElements;
for (IndexType linearId = startLinearId + threadIdx.x;
linearId < endLinearId;
linearId += THREADS_PER_BLOCK) {
// Convert `linearId` into an offset of `src`
const IndexType srcOffset =
linearIdToOffset<IndexType, SrcDims>(linearId, src);
// Convert `linearId` into an offset of `dst`
const IndexType dstOffset =
linearIdToOffset<IndexType, DstDims>(linearId, dst);
dst.data[dstOffset] = LDG(&src.data[srcOffset]);
}
}
THC_API void
THCudaTensor_copy(THCState *state, THCudaTensor* dst, THCudaTensor* src) {
long totalElements = THCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
THArgCheck(THCudaTensor_nDimension(state, dst) <= MAX_DIMS, 2,
"Copy only supported for <= 25 dimensions");
THArgCheck(THCudaTensor_nDimension(state, src) <= MAX_DIMS, 3,
"Copy only supported for <= 25 dimensions");
if (THCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool memcpyEligible =
(THCudaTensor_isContiguous(state, dst) && THCudaTensor_isContiguous(state, src)) ||
(totalElements == 1);
if (memcpyEligible) {
THCudaCheck(cudaMemcpyAsync(THCudaTensor_data(state, dst),
THCudaTensor_data(state, src),
totalElements * sizeof(float),
cudaMemcpyDeviceToDevice));
} else {
// We always work with a THREADS_PER_BLOCK-sized thread block,
// and assume a max sized grid dimension of MAX_GRID_SIZE.
// Each thread will process up to ELEMENTS_PER_THREAD elements.
const dim3 block(THREADS_PER_BLOCK);
long gridTiles = DIVUP(totalElements, block.x * ELEMENTS_PER_THREAD);
THArgCheck(gridTiles <= MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE, 2,
"tensor too large");
long gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
long gridY = 1;
long gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = DIVUP(gridTiles, MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = DIVUP(gridTiles, MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
dim3 grid(gridX, gridY, gridZ);
// It is possible that the tensor dimensions are able to be collapsed,
// and thus we can reduce the actual code complexity of the copy by
// exploiting this knowledge statically, since the div/mod is the
// most expensive part of the operation, more so than memory accesses.
// For instance, when copying a non-contiguous to a contiguous tensor
// (or vice versa), the contiguous tensor can be collapsed to one
// dimension, and the loop to translate the linear index to the array
// index can be similarly collapsed. That is what this unrolling is for.
#define HANDLE_CASE(TYPE, DST, SRC) \
THCudaTensor_kernel_copy<TYPE, DST, SRC> \
<<<grid, block>>>(dstInfo, srcInfo, (TYPE) totalElements); \
#define HANDLE_SRC_CASE(TYPE, DST, SRC) \
{ \
switch (SRC) { \
case 1: \
HANDLE_CASE(TYPE, DST, 1); \
break; \
case 2: \
HANDLE_CASE(TYPE, DST, 2); \
break; \
case 3: \
HANDLE_CASE(TYPE, DST, 3); \
break; \
case 4: \
HANDLE_CASE(TYPE, DST, 4); \
break; \
case 5: \
HANDLE_CASE(TYPE, DST, 5); \
break; \
default: \
HANDLE_CASE(TYPE, -1, -1); \
break; \
} \
}
#define HANDLE_DST_CASE(TYPE, DST, SRC) \
case DST: \
HANDLE_SRC_CASE(TYPE, DST, SRC); \
break;
// Can we use 32-bit integer math in the kernel (the linear ID for the copy
// and the resulting non-linear offset is all computable using 32-bit math?)
// We also use unsigned index math in the kernel, as signed div/mod has
// additional overhead.
if (canUse32BitCopyMath(state, src) && canUse32BitCopyMath(state, dst)) {
TensorInfo<unsigned int> dstInfo =
THCudaTensor_computeTensorInfo<unsigned int>(state, dst);
TensorInfo<unsigned int> srcInfo =
THCudaTensor_computeTensorInfo<unsigned int>(state, src);
switch (dstInfo.dims) {
HANDLE_DST_CASE(unsigned int, 1, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 2, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 3, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 4, srcInfo.dims);
HANDLE_DST_CASE(unsigned int, 5, srcInfo.dims);
default:
HANDLE_DST_CASE(unsigned int, -1, srcInfo.dims);
}
} else {
TensorInfo<unsigned long> dstInfo =
THCudaTensor_computeTensorInfo<unsigned long>(state, dst);
TensorInfo<unsigned long> srcInfo =
THCudaTensor_computeTensorInfo<unsigned long>(state, src);
switch (dstInfo.dims) {
HANDLE_DST_CASE(unsigned long, 1, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 2, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 3, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 4, srcInfo.dims);
HANDLE_DST_CASE(unsigned long, 5, srcInfo.dims);
default:
HANDLE_DST_CASE(unsigned long, -1, srcInfo.dims);
}
}
#undef HANDLE_CASE
#undef HANDLE_SRC_CASE
#undef HANDLE_DST_CASE
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
#undef DIVUP
#undef LDG
#undef ELEMENTS_PER_THREAD
#undef THREADS_PER_BLOCK
#undef MAX_GRID_SIZE
#undef MAX_DIMS
|
4e89f2232c34c1e6564ded28642531d1170772c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 5
#define ITERATIONS 50000000
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=1;
float Value2=A[i];
float Value3=B[i];
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
// if(i%32==0){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
// }
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 4e89f2232c34c1e6564ded28642531d1170772c6.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 5
#define ITERATIONS 50000000
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=1;
float Value2=A[i];
float Value3=B[i];
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
// if(i%32==0){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
// }
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
ed8caa523aa92c86d09e377ab97f293a6de436e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include "common_hip.cuh"
template <typename scalar_t>
__global__ void
grid_kernel(int64_t *cluster, at::cuda::detail::TensorInfo<scalar_t, int> pos,
scalar_t *__restrict__ size, scalar_t *__restrict__ start,
scalar_t *__restrict__ end, size_t num_nodes) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (ptrdiff_t i = index; i < num_nodes; i += stride) {
int64_t c = 0, k = 1;
scalar_t tmp;
for (ptrdiff_t d = 0; d < pos.sizes[1]; d++) {
tmp = pos.data[i * pos.strides[0] + d * pos.strides[1]] - start[d];
c += (int64_t)(tmp / size[d]) * k;
k += (int64_t)((end[d] - start[d]) / size[d]);
}
cluster[i] = c;
}
}
at::Tensor grid(at::Tensor pos, at::Tensor size, at::Tensor start,
at::Tensor end) {
auto cluster = at::empty(pos.type().toScalarType(at::kLong), {pos.size(0)});
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
hipLaunchKernelGGL(( grid_kernel<scalar_t>), dim3(BLOCKS(pos.size(0))), dim3(THREADS), 0, 0,
cluster.data<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int>(pos),
size.toType(pos.type()).data<scalar_t>(),
start.toType(pos.type()).data<scalar_t>(),
end.toType(pos.type()).data<scalar_t>(), pos.size(0));
});
return cluster;
}
| ed8caa523aa92c86d09e377ab97f293a6de436e3.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include "common.cuh"
template <typename scalar_t>
__global__ void
grid_kernel(int64_t *cluster, at::cuda::detail::TensorInfo<scalar_t, int> pos,
scalar_t *__restrict__ size, scalar_t *__restrict__ start,
scalar_t *__restrict__ end, size_t num_nodes) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (ptrdiff_t i = index; i < num_nodes; i += stride) {
int64_t c = 0, k = 1;
scalar_t tmp;
for (ptrdiff_t d = 0; d < pos.sizes[1]; d++) {
tmp = pos.data[i * pos.strides[0] + d * pos.strides[1]] - start[d];
c += (int64_t)(tmp / size[d]) * k;
k += (int64_t)((end[d] - start[d]) / size[d]);
}
cluster[i] = c;
}
}
at::Tensor grid(at::Tensor pos, at::Tensor size, at::Tensor start,
at::Tensor end) {
auto cluster = at::empty(pos.type().toScalarType(at::kLong), {pos.size(0)});
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
grid_kernel<scalar_t><<<BLOCKS(pos.size(0)), THREADS>>>(
cluster.data<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int>(pos),
size.toType(pos.type()).data<scalar_t>(),
start.toType(pos.type()).data<scalar_t>(),
end.toType(pos.type()).data<scalar_t>(), pos.size(0));
});
return cluster;
}
|
f6cbd05e76dfddbd52d931d36a36dcd9c78def1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Hello world for CUDA, with access to the shared memory of the multiprocessors
*/
#include <stdio.h>
#include <stdlib.h>
__shared__ float sums[10];
// Define a kernel function
__global__ void vector_sum(float* A, float* B, int length, const int N) {
// Take a vector A of length "length" and sum it, putting the result in
// the vector B
// Declare some shared memory to store the sums.
// We need enough floats for each thread to have one
//__shared__ float sums[N];
// Our ID is unique to our thread, so use it as our index
// Initialise our sum
sums[threadIdx.x] = 0;
// Calculate the sum
for (unsigned int i = 0; i < length; i++) {
sums[threadIdx.x] += A[i];
}
B[threadIdx.x] = sums[threadIdx.x];
}
int main() {
// This is the size of our output vector, and the number of threads
const int N = 10;
// This will be the length of our input vectors
int length = 50;
// These will be our vectors on the host
float* host_A; // This contains all input vectors
float* host_B;
// Use this for indices
int i;
// Define our vectors on the host
host_A = (float*) malloc(N*length*sizeof(float));
host_B = (float*) malloc(N*sizeof(float));
// Initialise them
for (i = 0; i < N*length; i++) {
host_A[i] = (float)(i%length);
//host_B[i] = 0.0;
}
// Define our vectors on the GPU
float* device_A;
float* device_B;
hipMalloc((void**) &device_A, sizeof(float)*N*length);
hipMalloc((void**) &device_B, sizeof(float)*N);
// Transfer data to the GPU
hipMemcpy(device_A, host_A, sizeof(float)*N*length,
hipMemcpyHostToDevice);
//hipMemcpy(device_B, host_B, sizeof(float)*N, hipMemcpyHostToDevice);
// Call our function; second number is how many threads to use
// The first number is to do with thread blocks...
hipLaunchKernelGGL(( vector_sum), dim3(1), dim3(N), 0, 0, device_A, device_B, length, N);
// Copy memory back
hipMemcpy(host_B, device_B, sizeof(float)*N, hipMemcpyDeviceToHost);
// Free device memory
hipFree(device_A);
hipFree(device_B);
// Output our results
printf("A = [");
for (i = 0; i < N*length; i++) {
if (i%length == 0) {
printf("\n");
}
printf("%G,", host_A[i]);
}
printf("]\n");
printf("B = [");
for (i = 0; i < N; i++) {
printf("%G,", host_B[i]);
}
printf("]\n");
return 0;
}
| f6cbd05e76dfddbd52d931d36a36dcd9c78def1d.cu | /*
* Hello world for CUDA, with access to the shared memory of the multiprocessors
*/
#include <stdio.h>
#include <stdlib.h>
__shared__ float sums[10];
// Define a kernel function
__global__ void vector_sum(float* A, float* B, int length, const int N) {
// Take a vector A of length "length" and sum it, putting the result in
// the vector B
// Declare some shared memory to store the sums.
// We need enough floats for each thread to have one
//__shared__ float sums[N];
// Our ID is unique to our thread, so use it as our index
// Initialise our sum
sums[threadIdx.x] = 0;
// Calculate the sum
for (unsigned int i = 0; i < length; i++) {
sums[threadIdx.x] += A[i];
}
B[threadIdx.x] = sums[threadIdx.x];
}
int main() {
// This is the size of our output vector, and the number of threads
const int N = 10;
// This will be the length of our input vectors
int length = 50;
// These will be our vectors on the host
float* host_A; // This contains all input vectors
float* host_B;
// Use this for indices
int i;
// Define our vectors on the host
host_A = (float*) malloc(N*length*sizeof(float));
host_B = (float*) malloc(N*sizeof(float));
// Initialise them
for (i = 0; i < N*length; i++) {
host_A[i] = (float)(i%length);
//host_B[i] = 0.0;
}
// Define our vectors on the GPU
float* device_A;
float* device_B;
cudaMalloc((void**) &device_A, sizeof(float)*N*length);
cudaMalloc((void**) &device_B, sizeof(float)*N);
// Transfer data to the GPU
cudaMemcpy(device_A, host_A, sizeof(float)*N*length,
cudaMemcpyHostToDevice);
//cudaMemcpy(device_B, host_B, sizeof(float)*N, cudaMemcpyHostToDevice);
// Call our function; second number is how many threads to use
// The first number is to do with thread blocks...
vector_sum<<<1, N>>>(device_A, device_B, length, N);
// Copy memory back
cudaMemcpy(host_B, device_B, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(device_A);
cudaFree(device_B);
// Output our results
printf("A = [");
for (i = 0; i < N*length; i++) {
if (i%length == 0) {
printf("\n");
}
printf("%G,", host_A[i]);
}
printf("]\n");
printf("B = [");
for (i = 0; i < N; i++) {
printf("%G,", host_B[i]);
}
printf("]\n");
return 0;
}
|
b4b380f623fcbbb8e8c04b49f07dfc844976b0ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cast_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
template <typename DstType, typename SrcType>
__global__ void CastKernel(const int N, const SrcType* X, DstType* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
// Y[i] = static_cast<DstType>(X[i]);
Y[i] = convert::To<SrcType, DstType>(X[i]);
}
}
template <>
template <typename DstType, typename SrcType>
bool CastOp<CUDAContext>::DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0, input.sizes(), at::dtype<DstType>());
const auto* data = input.template data<SrcType>();
auto* out = output->template mutable_data<DstType>();
DCHECK(input.numel() < INT_MAX);
int N = input.numel();
if (N == 0) {
// skip the rest of the computation if input is empty
return true;
}
hipLaunchKernelGGL(( CastKernel<DstType, SrcType>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, data, out);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename DstType>
bool CastOp<CUDAContext>::DoRunWithDstType() {
return DispatchHelper<
TensorTypes<
float,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
DstType>::call(this, Input(0));
}
// specific version that allows for casting to fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float>() {
return DispatchHelper<
TensorTypes<
float,
at::Half,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
float /* DstType */>::call(this, Input(0));
}
// specific version for casting _from_ fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<at::Half>() {
return DispatchHelper<
TensorTypes<
float,
at::Half>,
at::Half /* DstType */>::call(this, Input(0));
}
template <>
void CastOp<CUDAContext>::SetBody(TensorProto_DataType to) {
switch (to) {
case TensorProto_DataType_FLOAT:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float>;
break;
case TensorProto_DataType_INT32:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int>;
break;
case TensorProto_DataType_BYTE:
LOG(FATAL) << "BYTE is deprecated";
break;
case TensorProto_DataType_STRING:
CAFFE_THROW("Casting to and from strings is not supported yet");
// break;
case TensorProto_DataType_BOOL:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<bool>;
break;
case TensorProto_DataType_UINT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint8_t>;
break;
case TensorProto_DataType_INT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint16_t>;
break;
case TensorProto_DataType_INT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int16_t>;
break;
case TensorProto_DataType_INT64:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int64_t>;
break;
case TensorProto_DataType_FLOAT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<at::Half>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<double>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cast op must have 'to' argument of type DataType");
// break;
default:
CAFFE_THROW("Unexpected 'to' argument value: ", to);
}
}
REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>);
} // namespace caffe2
| b4b380f623fcbbb8e8c04b49f07dfc844976b0ac.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cast_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
template <typename DstType, typename SrcType>
__global__ void CastKernel(const int N, const SrcType* X, DstType* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
// Y[i] = static_cast<DstType>(X[i]);
Y[i] = convert::To<SrcType, DstType>(X[i]);
}
}
template <>
template <typename DstType, typename SrcType>
bool CastOp<CUDAContext>::DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0, input.sizes(), at::dtype<DstType>());
const auto* data = input.template data<SrcType>();
auto* out = output->template mutable_data<DstType>();
DCHECK(input.numel() < INT_MAX);
int N = input.numel();
if (N == 0) {
// skip the rest of the computation if input is empty
return true;
}
CastKernel<DstType, SrcType>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, data, out);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename DstType>
bool CastOp<CUDAContext>::DoRunWithDstType() {
return DispatchHelper<
TensorTypes<
float,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
DstType>::call(this, Input(0));
}
// specific version that allows for casting to fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float>() {
return DispatchHelper<
TensorTypes<
float,
at::Half,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
float /* DstType */>::call(this, Input(0));
}
// specific version for casting _from_ fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<at::Half>() {
return DispatchHelper<
TensorTypes<
float,
at::Half>,
at::Half /* DstType */>::call(this, Input(0));
}
template <>
void CastOp<CUDAContext>::SetBody(TensorProto_DataType to) {
switch (to) {
case TensorProto_DataType_FLOAT:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float>;
break;
case TensorProto_DataType_INT32:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int>;
break;
case TensorProto_DataType_BYTE:
LOG(FATAL) << "BYTE is deprecated";
break;
case TensorProto_DataType_STRING:
CAFFE_THROW("Casting to and from strings is not supported yet");
// break;
case TensorProto_DataType_BOOL:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<bool>;
break;
case TensorProto_DataType_UINT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint8_t>;
break;
case TensorProto_DataType_INT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint16_t>;
break;
case TensorProto_DataType_INT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int16_t>;
break;
case TensorProto_DataType_INT64:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int64_t>;
break;
case TensorProto_DataType_FLOAT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<at::Half>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<double>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cast op must have 'to' argument of type DataType");
// break;
default:
CAFFE_THROW("Unexpected 'to' argument value: ", to);
}
}
REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>);
} // namespace caffe2
|
fe5d6805223fdfb7006c29fb71d1289bcad24c37.hip | // !!! This is a file automatically generated by hipify!!!
// This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2013-2015 NVIDIA Corporation. All rights reserved.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <vector>
#include <limits>
#include <algorithm>
#include "../core/maths.h"
#include "../core/voxelize.h"
#include "../core/sdf.h"
#include "../include/flex.h"
#include "../demo/cloth.h"
#include "flexExt.h"
#define cudaCheck(x) { hipError_t err = x; if (err != hipSuccess) { printf("Cuda error: %d in %s at %s:%d\n", err, #x, __FILE__, __LINE__); assert(0); } }
class Bitmap
{
public:
typedef unsigned int Word;
static const int kWordSize = sizeof(Word)*8;
Bitmap(int numBits) : mBits((numBits+kWordSize-1)/kWordSize)
{
}
inline void Set(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
mBits[wordIndex] = word|(1<<bitIndex);
}
inline void Reset(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
mBits[wordIndex] = word&~(1<<bitIndex);
}
inline bool IsSet(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
return (word & (1<<bitIndex)) != 0;
}
private:
std::vector<Word> mBits;
};
// std::allocator compatible allocator for containers using pinned host memory (via flexAlloc)
template <class T>
struct PinnedAllocator
{
// typedefs
typedef T value_type;
typedef value_type* pointer;
typedef const value_type* const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
inline explicit PinnedAllocator() {}
inline ~PinnedAllocator() {}
inline explicit PinnedAllocator(PinnedAllocator const&) {}
template<typename U>
inline explicit PinnedAllocator(PinnedAllocator<U> const&) {}
inline size_type max_size() const
{
return std::numeric_limits<size_type>::max();
}
T* allocate(std::size_t n)
{
return (T*)flexAlloc(int(n*sizeof(T)));
}
void deallocate(T* p, std::size_t n)
{
flexFree(p);
}
public :
// convert an allocator<T> to allocator<U>
template<typename U>
struct rebind {
typedef PinnedAllocator<U> other;
};
// construction/destruction
inline void construct(pointer p, const T& t) { new(p) T(t); }
inline void destroy(pointer p) { p->~T(); }
};
template <class T, class U>
bool operator==(const PinnedAllocator<T>&, const PinnedAllocator<U>&);
template <class T, class U>
bool operator!=(const PinnedAllocator<T>&, const PinnedAllocator<U>&);
struct FlexExtContainer
{
int mMaxParticles;
FlexSolver* mSolver;
// first n indices
std::vector<int, PinnedAllocator<int> > mActiveList;
std::vector<int> mFreeList;
std::vector<FlexExtInstance*> mInstances;
// particles
std::vector<Vec4, PinnedAllocator<Vec4> > mParticles;
std::vector<Vec3, PinnedAllocator<Vec3> > mVelocities;
std::vector<int, PinnedAllocator<int> > mPhases;
std::vector<Vec4, PinnedAllocator<Vec4> > mNormals;
// rigids
std::vector<int> mRigidOffsets;
std::vector<int> mRigidIndices;
std::vector<float> mRigidCoefficients;
std::vector<Matrix33, PinnedAllocator<Vec3> > mRigidRotations;
std::vector<Vec3, PinnedAllocator<Vec3> > mRigidTranslations;
std::vector<Vec3, PinnedAllocator<Vec3> > mRigidLocalPositions;
// springs
std::vector<int, PinnedAllocator<int> > mSpringIndices;
std::vector<float, PinnedAllocator<float> > mSpringLengths;
std::vector<float, PinnedAllocator<float> > mSpringCoefficients;
// cloth
std::vector<int, PinnedAllocator<int> > mTriangleIndices;
std::vector<Vec3, PinnedAllocator<Vec3> > mTriangleNormals;
std::vector<int> mInflatableStarts;
std::vector<int> mInflatableCounts;
std::vector<float> mInflatableRestVolumes;
std::vector<float> mInflatableCoefficients;
std::vector<float> mInflatableOverPressures;
//force fields
FlexExtForceField* mForceFieldsGpu;
int mMaxForceFields;
int mNumForceFields;
int* mTmpActiveIndicesGpu;
Vec4* mTmpParticlesGpu;
Vec3* mTmpVelocitiesGpu;
int mMaxTmpParticles;
// needs compact
bool mNeedsCompact;
// needs to update active list
bool mNeedsActiveListRebuild;
};
namespace
{
const int kNumThreadsPerBlock = 256;
// writes data to the device depending on source type
void WriteDeviceData(void *dst, const void *src, size_t count, FlexMemory source)
{
hipMemcpyKind kind;
// host or device source
if (source == eFlexMemoryHost || source == eFlexMemoryHostAsync)
kind = hipMemcpyHostToDevice;
else
kind = hipMemcpyDeviceToDevice;
// synchronous or async copy
if (source == eFlexMemoryHostAsync || source == eFlexMemoryDeviceAsync)
{
cudaCheck(hipMemcpyAsync(dst, src, count, kind, 0));
}
else
{
cudaCheck(hipMemcpy(dst, src, count, kind));
}
}
// compacts all constraints into linear arrays
void CompactObjects(FlexExtContainer* c)
{
// rigids
c->mRigidOffsets.resize(1);
c->mRigidIndices.resize(0);
c->mRigidCoefficients.resize(0);
c->mRigidRotations.resize(0);
c->mRigidLocalPositions.resize(0);
c->mRigidTranslations.resize(0);
int totalNumSprings = 0;
int totalNumTris = 0;
// pre-calculate array sizes
for (size_t i=0; i < c->mInstances.size(); ++i)
{
FlexExtInstance* inst = c->mInstances[i];
const FlexExtAsset* asset = inst->mAsset;
// index into the triangle array for this instance
inst->mTriangleIndex = totalNumTris;
totalNumSprings += asset->mNumSprings;
totalNumTris += asset->mNumTriangles;
}
// springs
c->mSpringIndices.resize(totalNumSprings*2);
c->mSpringLengths.resize(totalNumSprings);
c->mSpringCoefficients.resize(totalNumSprings);
// cloth
c->mTriangleIndices.resize(totalNumTris*3);
c->mTriangleNormals.resize(totalNumTris);
// inflatables
c->mInflatableStarts.resize(0);
c->mInflatableCounts.resize(0);
c->mInflatableRestVolumes.resize(0);
c->mInflatableCoefficients.resize(0);
c->mInflatableOverPressures.resize(0);
int* __restrict dstSpringIndices = (totalNumSprings)?&c->mSpringIndices[0]:NULL;
float* __restrict dstSpringLengths = (totalNumSprings)?&c->mSpringLengths[0]:NULL;
float* __restrict dstSpringCoefficients = (totalNumSprings)?&c->mSpringCoefficients[0]:NULL;
int* __restrict dstTriangleIndices = (totalNumTris)?&c->mTriangleIndices[0]:NULL;
// go through each instance and update springs, rigids, etc
for (size_t i=0; i < c->mInstances.size(); ++i)
{
FlexExtInstance* inst = c->mInstances[i];
const FlexExtAsset* asset = inst->mAsset;
// map indices from the asset to the instance
const int* __restrict remap = &inst->mParticleIndices[0];
// flatten spring data
int numSprings = asset->mNumSprings;
const int numSpringIndices = asset->mNumSprings*2;
const int* __restrict srcSpringIndices = asset->mSpringIndices;
for (int i=0; i < numSpringIndices; ++i)
{
*dstSpringIndices = remap[*srcSpringIndices];
++dstSpringIndices;
++srcSpringIndices;
}
memcpy(dstSpringLengths, asset->mSpringRestLengths, numSprings*sizeof(float));
memcpy(dstSpringCoefficients, asset->mSpringCoefficients, numSprings*sizeof(float));
dstSpringLengths += numSprings;
dstSpringCoefficients += numSprings;
// rigids
if (asset->mRigidStiffness > 0.0f)
{
inst->mRigidIndex = int(c->mRigidOffsets.size())-1;
for (int i=0; i < asset->mNumParticles; ++i)
{
c->mRigidIndices.push_back(remap[i]);
c->mRigidLocalPositions.push_back(Vec3(&asset->mParticles[i*4])-Vec3(asset->mRigidCenter));
}
// start index of rigid
c->mRigidOffsets.push_back(int(c->mRigidIndices.size()));
c->mRigidCoefficients.push_back(asset->mRigidStiffness);
c->mRigidRotations.push_back(Matrix33::Identity());
c->mRigidTranslations.push_back(Vec3());
}
if (asset->mNumTriangles)
{
// triangles
const int numTriIndices = asset->mNumTriangles*3;
const int* __restrict srcTriIndices = asset->mTriangleIndices;
for (int i=0; i < numTriIndices; ++i)
{
*dstTriangleIndices = remap[*srcTriIndices];
++dstTriangleIndices;
++srcTriIndices;
}
if (asset->mInflatable)
{
c->mInflatableStarts.push_back(inst->mTriangleIndex);
c->mInflatableCounts.push_back(asset->mNumTriangles);
c->mInflatableRestVolumes.push_back(asset->mInflatableVolume);
c->mInflatableCoefficients.push_back(asset->mInflatableStiffness);
c->mInflatableOverPressures.push_back(asset->mInflatablePressure);
}
}
}
// springs
if (c->mSpringLengths.size())
flexSetSprings(c->mSolver, &c->mSpringIndices[0], &c->mSpringLengths[0], &c->mSpringCoefficients[0], int(c->mSpringLengths.size()), eFlexMemoryHostAsync);
else
flexSetSprings(c->mSolver, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
// rigids
if (c->mRigidCoefficients.size())
flexSetRigids(c->mSolver, &c->mRigidOffsets[0], &c->mRigidIndices[0], (float*)&c->mRigidLocalPositions[0], NULL, &c->mRigidCoefficients[0], (float*)&c->mRigidRotations[0], int(c->mRigidCoefficients.size()), eFlexMemoryHostAsync);
else
flexSetRigids(c->mSolver, NULL, NULL, NULL, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
// triangles
if (c->mTriangleIndices.size())
flexSetDynamicTriangles(c->mSolver, &c->mTriangleIndices[0], NULL, int(c->mTriangleIndices.size()/3), eFlexMemoryHostAsync);
else
flexSetDynamicTriangles(c->mSolver, NULL, NULL, 0, eFlexMemoryHostAsync);
// inflatables
if (c->mInflatableCounts.size())
flexSetInflatables(c->mSolver, &c->mInflatableStarts[0], &c->mInflatableCounts[0], &c->mInflatableRestVolumes[0], &c->mInflatableOverPressures[0], &c->mInflatableCoefficients[0], int(c->mInflatableCounts.size()),eFlexMemoryHost);
else
flexSetInflatables(c->mSolver, NULL, NULL, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
c->mNeedsCompact = false;
}
} // anonymous namespace
FlexExtContainer* flexExtCreateContainer(FlexSolver* solver, int maxParticles)
{
FlexExtContainer* c = new FlexExtContainer();
c->mSolver = solver;
c->mMaxParticles = maxParticles;
// initialize free list
c->mFreeList.resize(maxParticles);
for (int i=0; i < maxParticles; ++i)
c->mFreeList[i] = i;
c->mActiveList.resize(maxParticles);
c->mParticles.resize(maxParticles);
c->mVelocities.resize(maxParticles);
c->mPhases.resize(maxParticles);
c->mNormals.resize(maxParticles);
// force fields
c->mForceFieldsGpu = NULL;
c->mMaxForceFields = 0;
c->mNumForceFields = 0;
c->mTmpActiveIndicesGpu = NULL;
c->mTmpParticlesGpu = NULL;
c->mTmpVelocitiesGpu = NULL;
c->mMaxTmpParticles = 0;
c->mNeedsCompact = false;
return c;
}
void flexExtDestroyContainer(FlexExtContainer* c)
{
// force fields
cudaCheck(hipFree(c->mForceFieldsGpu));
cudaCheck(hipFree(c->mTmpActiveIndicesGpu));
cudaCheck(hipFree(c->mTmpParticlesGpu));
cudaCheck(hipFree(c->mTmpVelocitiesGpu));
delete c;
}
int flexExtAllocParticles(FlexExtContainer* c, int n, int* indices)
{
const int numToAlloc = Min(int(c->mFreeList.size()), n);
const int start = int(c->mFreeList.size())-numToAlloc;
if (numToAlloc)
{
memcpy(indices, &c->mFreeList[start], numToAlloc*sizeof(int));
c->mFreeList.resize(start);
}
c->mNeedsActiveListRebuild = true;
return numToAlloc;
}
void flexExtFreeParticles(FlexExtContainer* c, int n, const int* indices)
{
#if _DEBUG
for (int i=0; i < n; ++i)
{
// check valid values
assert(indices[i] >= 0 && indices[i] < int(c->mFreeList.capacity()));
// check for double delete
assert(std::find(c->mFreeList.begin(), c->mFreeList.end(), indices[i]) == c->mFreeList.end());
}
#endif
c->mFreeList.insert(c->mFreeList.end(), indices, indices+n);
c->mNeedsActiveListRebuild = true;
}
int flexExtGetActiveList(FlexExtContainer* c, int* indices)
{
int count = 0;
Bitmap inactive(c->mMaxParticles);
// create bitmap
for (size_t i=0; i < c->mFreeList.size(); ++i)
{
// if this fires then somehow a duplicate has ended up in the free list (double delete)
assert(!inactive.IsSet(c->mFreeList[i]));
inactive.Set(c->mFreeList[i]);
}
// iterate bitmap to find active elements
for (int i=0; i < c->mMaxParticles; ++i)
if (inactive.IsSet(i) == false)
indices[count++] = i;
return count;
}
void flexExtGetParticleData(FlexExtContainer* c, float** particles, float** velocities, int** phases, float** normals)
{
if (particles && c->mParticles.size())
*particles = (float*)&c->mParticles[0];
if (velocities && c->mVelocities.size())
*velocities = (float*)&c->mVelocities[0];
if (phases && c->mPhases.size())
*phases = (int*)&c->mPhases[0];
if (normals && c->mNormals.size())
*normals = (float*)&c->mNormals[0];
}
void flexExtGetTriangleData(FlexExtContainer* c, int** indices, float** normals)
{
if (indices && c->mTriangleIndices.size())
*indices = &c->mTriangleIndices[0];
if (normals && c->mTriangleNormals.size())
*normals = (float*)&c->mTriangleNormals[0];
}
void flexExtGetRigidData(FlexExtContainer* c, float** rotations, float** positions)
{
if (rotations && c->mRigidRotations.size())
*rotations = (float*)&c->mRigidRotations[0];
if (positions && c->mRigidTranslations.size())
*positions = (float*)&c->mRigidTranslations[0];
}
FlexExtInstance* flexExtCreateInstance(FlexExtContainer* c, const FlexExtAsset* asset, const float* transform, float vx, float vy, float vz, int phase, float invMassScale)
{
const int numParticles = asset->mNumParticles;
// check if asset will fit
if (int(c->mFreeList.size()) < numParticles)
return NULL;
FlexExtInstance* inst = new FlexExtInstance();
inst->mAsset = asset;
inst->mTriangleIndex = -1;
inst->mRigidIndex = -1;
inst->mInflatableIndex = -1;
inst->mUserData = NULL;
inst->mNumParticles = numParticles;
// allocate particles for instance
inst->mParticleIndices = new int[numParticles];
int n = flexExtAllocParticles(c, numParticles, &inst->mParticleIndices[0]);
assert(n == numParticles);
(void)n;
c->mInstances.push_back(inst);
const Matrix44 xform(transform);
for (int i=0; i < numParticles; ++i)
{
const int index = inst->mParticleIndices[i];
// add transformed particles to the container
c->mParticles[index] = xform*Vec4(Vec3(&asset->mParticles[i*4]), 1.0f);
c->mParticles[index].w = asset->mParticles[i*4+3]*invMassScale;
c->mVelocities[index] = Vec3(vx, vy, vz);
c->mPhases[index] = phase;
c->mNormals[index] = Vec4(0.0f);
}
c->mNeedsCompact = true;
c->mNeedsActiveListRebuild = true;
return inst;
}
void flexExtDestroyInstance(FlexExtContainer* c, const FlexExtInstance* inst)
{
flexExtFreeParticles(c, inst->mNumParticles, &inst->mParticleIndices[0]);
delete[] inst->mParticleIndices;
// TODO: O(N) remove
std::vector<FlexExtInstance*>::iterator iter = std::find(c->mInstances.begin(), c->mInstances.end(), inst);
assert(iter != c->mInstances.end());
c->mInstances.erase(iter);
c->mNeedsCompact = true;
c->mNeedsActiveListRebuild = true;
delete inst;
}
void flexExtTickContainer(FlexExtContainer* c, float dt, int substeps, FlexTimers* timers)
{
// update the device
flexExtPushToDevice(c);
// update solver
flexUpdateSolver(c->mSolver, dt, substeps, timers);
// update host
flexExtPullFromDevice(c);
// ensure memory transfers have finished
flexSetFence();
flexWaitFence();
}
void flexExtPushToDevice(FlexExtContainer* c)
{
if (c->mNeedsActiveListRebuild)
{
// update active list
int n = flexExtGetActiveList(c, &c->mActiveList[0]);
flexSetActive(c->mSolver, &c->mActiveList[0], n, eFlexMemoryHostAsync);
c->mNeedsActiveListRebuild = false;
}
// push any changes to soler
flexSetParticles(c->mSolver, (float*)&c->mParticles[0], int(c->mParticles.size()), eFlexMemoryHostAsync);
flexSetVelocities(c->mSolver, (float*)&c->mVelocities[0], int(c->mVelocities.size()), eFlexMemoryHostAsync);
flexSetPhases(c->mSolver, &c->mPhases[0], int(c->mPhases.size()), eFlexMemoryHostAsync);
flexSetNormals(c->mSolver, (float*)&c->mNormals[0], int(c->mNormals.size()), eFlexMemoryHostAsync);
if (c->mNeedsCompact)
CompactObjects(c);
}
void flexExtPullFromDevice(FlexExtContainer* c)
{
// read back particle data
flexGetParticles(c->mSolver, (float*)&c->mParticles[0], int(c->mParticles.size()), eFlexMemoryHostAsync);
flexGetVelocities(c->mSolver, (float*)&c->mVelocities[0], int(c->mVelocities.size()), eFlexMemoryHostAsync);
flexGetPhases(c->mSolver, &c->mPhases[0], int(c->mPhases.size()), eFlexMemoryHostAsync);
flexGetNormals(c->mSolver, (float*)&c->mNormals[0], int(c->mNormals.size()), eFlexMemoryHostAsync);
// read back rigid transforms
if (c->mRigidCoefficients.size())
flexGetRigidTransforms(c->mSolver, (float*)&c->mRigidRotations[0], (float*)&c->mRigidTranslations[0], eFlexMemoryHostAsync);
}
namespace
{
struct Key
{
Key(int i, float d) : index(i), depth(d) {}
int index;
float depth;
bool operator < (const Key& rhs) const { return depth < rhs.depth; }
};
}
int flexExtCreateWeldedMeshIndices(const float* vertices, int numVertices, int* uniqueIndices, int* originalToUniqueMap, float threshold)
{
memset(originalToUniqueMap, -1, numVertices*sizeof(int));
const Vec3* positions = (const Vec3*)vertices;
// use a sweep and prune style search to accelerate neighbor finding
std::vector<Key> keys;
for (int i=0; i < numVertices; i++)
keys.push_back(Key(i, positions[i].z));
std::sort(keys.begin(), keys.end());
int uniqueCount = 0;
// sweep keys to find matching verts
for (int i=0; i < numVertices; ++i)
{
// we are a duplicate, skip
if (originalToUniqueMap[keys[i].index] != -1)
continue;
// scan forward until no vertex can be closer than threshold
for (int j=i+1; j < numVertices && (keys[j].depth-keys[i].depth) <= threshold; ++j)
{
float distance = Length(Vector3(positions[keys[i].index])-Vector3(positions[keys[j].index]));
if (distance <= threshold)
originalToUniqueMap[keys[j].index] = uniqueCount;
}
originalToUniqueMap[keys[i].index] = uniqueCount;
uniqueIndices[uniqueCount++] = keys[i].index;
}
return uniqueCount;
}
namespace
{
float SampleSDF(const float* sdf, int dim, int x, int y, int z)
{
assert(x < dim && x >= 0);
assert(y < dim && y >= 0);
assert(z < dim && z >= 0);
return sdf[z*dim*dim + y*dim + x];
}
// return normal of signed distance field
Vec3 SampleSDFGrad(const float* sdf, int dim, int x, int y, int z)
{
int x0 = ::max(x-1, 0);
int x1 = ::min(x+1, dim-1);
int y0 = ::max(y-1, 0);
int y1 = ::min(y+1, dim-1);
int z0 = ::max(z-1, 0);
int z1 = ::min(z+1, dim-1);
float dx = (SampleSDF(sdf, dim, x1, y, z) - SampleSDF(sdf, dim, x0, y, z))*(dim*0.5f);
float dy = (SampleSDF(sdf, dim, x, y1, z) - SampleSDF(sdf, dim, x, y0, z))*(dim*0.5f);
float dz = (SampleSDF(sdf, dim, x, y, z1) - SampleSDF(sdf, dim, x, y, z0))*(dim*0.5f);
return Vec3(dx, dy, dz);
}
} // anonymous namespace
FlexExtAsset* flexExtCreateRigidFromMesh(const float* vertices, int numVertices, const int* indices, int numTriangleIndices, float spacing)
{
std::vector<Vec4> particles;
std::vector<Vec4> normals;
std::vector<int> phases;
const Vec3* positions = (Vec3*)vertices;
Vec3 meshLower(FLT_MAX), meshUpper(-FLT_MAX);
for (int i=0; i < numVertices; ++i)
{
meshLower = Min(meshLower, positions[i]);
meshUpper = Max(meshUpper, positions[i]);
}
Vec3 edges = meshUpper-meshLower;
float maxEdge = ::max(::max(edges.x, edges.y), edges.z);
// tweak spacing to avoid edge cases for particles laying on the boundary
// just covers the case where an edge is a whole multiple of the spacing.
float spacingEps = spacing*(1.0f - 1e-4f);
// make sure to have at least one particle in each dimension
int dx, dy, dz;
dx = spacing > edges.x ? 1 : int(edges.x/spacingEps);
dy = spacing > edges.y ? 1 : int(edges.y/spacingEps);
dz = spacing > edges.z ? 1 : int(edges.z/spacingEps);
int maxDim = ::max(::max(dx, dy), dz);
// expand border by two voxels to ensure adequate sampling at edges
meshLower -= 2.0f*Vec3(spacing);
meshUpper += 2.0f*Vec3(spacing);
maxDim += 4;
// we shift the voxelization bounds so that the voxel centers
// lie symmetrically to the center of the object. this reduces the
// chance of missing features, and also better aligns the particles
// with the mesh
Vec3 meshOffset;
meshOffset.x = 0.5f * (spacing - (edges.x - (dx-1)*spacing));
meshOffset.y = 0.5f * (spacing - (edges.y - (dy-1)*spacing));
meshOffset.z = 0.5f * (spacing - (edges.z - (dz-1)*spacing));
meshLower -= meshOffset;
// don't allow samplings with > 64 per-side
if (maxDim > 64)
return NULL;
std::vector<uint32_t> voxels(maxDim*maxDim*maxDim);
Voxelize(vertices, numVertices, indices, numTriangleIndices, maxDim, maxDim, maxDim, &voxels[0], meshLower, meshLower + Vec3(maxDim*spacing));
std::vector<float> sdf(maxDim*maxDim*maxDim);
MakeSDF(&voxels[0], maxDim, maxDim, maxDim, &sdf[0]);
Vec3 center;
for (int x=0; x < maxDim; ++x)
{
for (int y=0; y < maxDim; ++y)
{
for (int z=0; z < maxDim; ++z)
{
const int index = z*maxDim*maxDim + y*maxDim + x;
// if voxel is marked as occupied the add a particle
if (voxels[index])
{
Vec3 position = meshLower + spacing*Vec3(float(x) + 0.5f, float(y) + 0.5f, float(z) + 0.5f);
// normalize the sdf value and transform to world scale
Vec3 n = SafeNormalize(SampleSDFGrad(&sdf[0], maxDim, x, y, z));
float d = sdf[index]*maxEdge;
normals.push_back(Vec4(n, d));
particles.push_back(Vec4(position.x, position.y, position.z, 1.0f));
phases.push_back(0);
center += position;
}
}
}
}
FlexExtAsset* asset = new FlexExtAsset();
memset(asset, 0, sizeof(asset));
if (particles.size())
{
// store center of mass
center /= float(particles.size());
asset->mRigidCenter[0] = center.x;
asset->mRigidCenter[1] = center.y;
asset->mRigidCenter[2] = center.z;
asset->mNumParticles = int(particles.size());
asset->mParticles = new float[particles.size()*4];
memcpy(asset->mParticles, &particles[0], sizeof(Vec4)*particles.size());
// todo: normals
}
return asset;
}
FlexExtAsset* flexExtCreateClothFromMesh(const float* particles, int numVertices, const int* indices, int numTriangles, float stretchStiffness, float bendStiffness, float tetherStiffness, float tetherGive, float pressure)
{
FlexExtAsset* asset = new FlexExtAsset();
memset(asset, 0, sizeof(asset));
asset->mParticles = new float[numVertices*4];
memcpy(asset->mParticles, particles, sizeof(float)*4);
asset->mTriangleIndices = new int[numTriangles*3];
memcpy(asset->mTriangleIndices, indices, numTriangles*3*sizeof(int));
asset->mNumParticles = numVertices;
asset->mNumTriangles = numTriangles;
// create cloth mesh
ClothMesh cloth((Vec4*)particles, numVertices, indices, numTriangles*3, stretchStiffness, bendStiffness, true);
if (cloth.mValid)
{
// create tethers
if (tetherStiffness > 0.0f)
{
std::vector<int> anchors;
anchors.reserve(numVertices);
// find anchors
for (int i=0; i < numVertices; ++i)
{
Vec4& particle = ((Vec4*)particles)[i];
if (particle.w == 0.0f)
anchors.push_back(i);
}
if (anchors.size())
{
// create tethers
for (int i=0; i < numVertices; ++i)
{
Vec4& particle = ((Vec4*)particles)[i];
if (particle.w == 0.0f)
continue;
float minSqrDist = FLT_MAX;
int minIndex = -1;
// find the closest attachment point
for (int a=0; a < int(anchors.size()); ++a)
{
Vec4& attachment = ((Vec4*)particles)[anchors[a]];
float distSqr = LengthSq(Vec3(particle)-Vec3(attachment));
if (distSqr < minSqrDist)
{
minSqrDist = distSqr;
minIndex = anchors[a];
}
}
// add a tether
if (minIndex != -1)
{
cloth.mConstraintIndices.push_back(i);
cloth.mConstraintIndices.push_back(minIndex);
cloth.mConstraintRestLengths.push_back(sqrtf(minSqrDist)*(1.0f + tetherGive));
// negative stiffness indicates tether (unilateral constraint)
cloth.mConstraintCoefficients.push_back(-tetherStiffness);
}
}
}
}
const int numSprings = int(cloth.mConstraintCoefficients.size());
asset->mSpringIndices = new int[numSprings*2];
asset->mSpringCoefficients = new float[numSprings];
asset->mSpringRestLengths = new float[numSprings];
asset->mNumSprings = numSprings;
for (int i=0; i < numSprings; ++i)
{
asset->mSpringIndices[i*2+0] = cloth.mConstraintIndices[i*2+0];
asset->mSpringIndices[i*2+1] = cloth.mConstraintIndices[i*2+1];
asset->mSpringRestLengths[i] = cloth.mConstraintRestLengths[i];
asset->mSpringCoefficients[i] = cloth.mConstraintCoefficients[i];
}
if (pressure > 0.0f)
{
asset->mInflatable = true;
asset->mInflatableVolume = cloth.mRestVolume;
asset->mInflatableStiffness = cloth.mConstraintScale;
asset->mInflatablePressure = pressure;
}
}
else
{
flexExtDestroyAsset(asset);
return NULL;
}
return asset;
}
void flexExtDestroyAsset(FlexExtAsset* asset)
{
delete[] asset->mParticles;
delete[] asset->mSpringIndices;
delete[] asset->mSpringCoefficients;
delete[] asset->mSpringRestLengths;
delete[] asset->mTriangleIndices;
delete asset;
}
void flexExtSetForceFields(FlexExtContainer* c, const FlexExtForceField* forceFields, int numForceFields, FlexMemory source)
{
// re-alloc if necessary
if (numForceFields > c->mMaxForceFields)
{
cudaCheck(hipFree(c->mForceFieldsGpu));
cudaCheck(hipMalloc(&c->mForceFieldsGpu, sizeof(FlexExtForceField)*numForceFields));
c->mMaxForceFields = numForceFields;
}
c->mNumForceFields = numForceFields;
if (numForceFields > 0)
{
WriteDeviceData(c->mForceFieldsGpu, forceFields, numForceFields*sizeof(FlexExtForceField), source);
}
}
__global__ void UpdateForceFields(int numParticles, const int* __restrict__ activeIndices, const Vec4* __restrict__ positions, Vec3* __restrict__ velocities, const FlexExtForceField* __restrict__ forceFields, int numForceFields, float dt)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
for (int f = 0; f < numForceFields; f++)
{
const FlexExtForceField& forceField = forceFields[f];
if (i < numParticles)
{
const int index = activeIndices[i];
Vec4 p = positions[index];
Vec3 v = velocities[index];
Vec3 localPos = Vec3(p.x, p.y, p.z) - Vec3(forceField.mPosition[0], forceField.mPosition[1], forceField.mPosition[2]);
float length = Length(localPos);
if (length > forceField.mRadius)
{
continue;
}
Vec3 fieldDir;
if (length > 0.0f)
{
fieldDir = localPos / length;
}
else
{
fieldDir = localPos;
}
// If using linear falloff, scale with distance.
float fieldStrength = forceField.mStrength;
if (forceField.mLinearFalloff)
{
fieldStrength *= (1.0f - (length / forceField.mRadius));
}
// Apply force
Vec3 force = localPos * fieldStrength;
float unitMultiplier;
if (forceField.mMode == eFlexExtModeForce)
{
unitMultiplier = dt * p.w; // time/mass
}
else if (forceField.mMode == eFlexExtModeImpulse)
{
unitMultiplier = p.w; // 1/mass
}
else if (forceField.mMode == eFlexExtModeVelocityChange)
{
unitMultiplier = 1.0f;
}
Vec3 deltaVelocity = fieldDir * fieldStrength * unitMultiplier;
velocities[index] = v + deltaVelocity;
}
}
}
void flexExtApplyForceFields(FlexExtContainer* c, float dt)
{
int numParticles = flexGetActiveCount(c->mSolver);
if (numParticles && c->mNumForceFields)
{
// reallocate temp buffers if necessary
if (int(c->mParticles.size()) > c->mMaxTmpParticles)
{
c->mMaxTmpParticles = int(c->mParticles.size());
cudaCheck(hipFree(c->mTmpActiveIndicesGpu));
cudaCheck(hipFree(c->mTmpParticlesGpu));
cudaCheck(hipFree(c->mTmpVelocitiesGpu));
cudaCheck(hipMalloc(&c->mTmpActiveIndicesGpu, sizeof(int)*c->mMaxTmpParticles));
cudaCheck(hipMalloc(&c->mTmpParticlesGpu, sizeof(Vec4)*c->mMaxTmpParticles));
cudaCheck(hipMalloc(&c->mTmpVelocitiesGpu, sizeof(Vec3)*c->mMaxTmpParticles));
}
flexGetActive(c->mSolver, c->mTmpActiveIndicesGpu, eFlexMemoryDeviceAsync);
flexGetParticles(c->mSolver, (float*)c->mTmpParticlesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
flexGetVelocities(c->mSolver, (float*)c->mTmpVelocitiesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
const int kNumBlocks = (numParticles+kNumThreadsPerBlock-1)/kNumThreadsPerBlock;
hipLaunchKernelGGL(( UpdateForceFields), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0,
numParticles,
c->mTmpActiveIndicesGpu,
c->mTmpParticlesGpu,
c->mTmpVelocitiesGpu,
c->mForceFieldsGpu,
c->mNumForceFields,
dt
);
flexSetVelocities(c->mSolver, (float*)c->mTmpVelocitiesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
}
}
| fe5d6805223fdfb7006c29fb71d1289bcad24c37.cu | // This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2013-2015 NVIDIA Corporation. All rights reserved.
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include <limits>
#include <algorithm>
#include "../core/maths.h"
#include "../core/voxelize.h"
#include "../core/sdf.h"
#include "../include/flex.h"
#include "../demo/cloth.h"
#include "flexExt.h"
#define cudaCheck(x) { cudaError_t err = x; if (err != cudaSuccess) { printf("Cuda error: %d in %s at %s:%d\n", err, #x, __FILE__, __LINE__); assert(0); } }
class Bitmap
{
public:
typedef unsigned int Word;
static const int kWordSize = sizeof(Word)*8;
Bitmap(int numBits) : mBits((numBits+kWordSize-1)/kWordSize)
{
}
inline void Set(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
mBits[wordIndex] = word|(1<<bitIndex);
}
inline void Reset(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
mBits[wordIndex] = word&~(1<<bitIndex);
}
inline bool IsSet(int bit)
{
const int wordIndex = bit/kWordSize;
const int bitIndex = bit&(kWordSize-1);
const Word word = mBits[wordIndex];
return (word & (1<<bitIndex)) != 0;
}
private:
std::vector<Word> mBits;
};
// std::allocator compatible allocator for containers using pinned host memory (via flexAlloc)
template <class T>
struct PinnedAllocator
{
// typedefs
typedef T value_type;
typedef value_type* pointer;
typedef const value_type* const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
inline explicit PinnedAllocator() {}
inline ~PinnedAllocator() {}
inline explicit PinnedAllocator(PinnedAllocator const&) {}
template<typename U>
inline explicit PinnedAllocator(PinnedAllocator<U> const&) {}
inline size_type max_size() const
{
return std::numeric_limits<size_type>::max();
}
T* allocate(std::size_t n)
{
return (T*)flexAlloc(int(n*sizeof(T)));
}
void deallocate(T* p, std::size_t n)
{
flexFree(p);
}
public :
// convert an allocator<T> to allocator<U>
template<typename U>
struct rebind {
typedef PinnedAllocator<U> other;
};
// construction/destruction
inline void construct(pointer p, const T& t) { new(p) T(t); }
inline void destroy(pointer p) { p->~T(); }
};
template <class T, class U>
bool operator==(const PinnedAllocator<T>&, const PinnedAllocator<U>&);
template <class T, class U>
bool operator!=(const PinnedAllocator<T>&, const PinnedAllocator<U>&);
struct FlexExtContainer
{
int mMaxParticles;
FlexSolver* mSolver;
// first n indices
std::vector<int, PinnedAllocator<int> > mActiveList;
std::vector<int> mFreeList;
std::vector<FlexExtInstance*> mInstances;
// particles
std::vector<Vec4, PinnedAllocator<Vec4> > mParticles;
std::vector<Vec3, PinnedAllocator<Vec3> > mVelocities;
std::vector<int, PinnedAllocator<int> > mPhases;
std::vector<Vec4, PinnedAllocator<Vec4> > mNormals;
// rigids
std::vector<int> mRigidOffsets;
std::vector<int> mRigidIndices;
std::vector<float> mRigidCoefficients;
std::vector<Matrix33, PinnedAllocator<Vec3> > mRigidRotations;
std::vector<Vec3, PinnedAllocator<Vec3> > mRigidTranslations;
std::vector<Vec3, PinnedAllocator<Vec3> > mRigidLocalPositions;
// springs
std::vector<int, PinnedAllocator<int> > mSpringIndices;
std::vector<float, PinnedAllocator<float> > mSpringLengths;
std::vector<float, PinnedAllocator<float> > mSpringCoefficients;
// cloth
std::vector<int, PinnedAllocator<int> > mTriangleIndices;
std::vector<Vec3, PinnedAllocator<Vec3> > mTriangleNormals;
std::vector<int> mInflatableStarts;
std::vector<int> mInflatableCounts;
std::vector<float> mInflatableRestVolumes;
std::vector<float> mInflatableCoefficients;
std::vector<float> mInflatableOverPressures;
//force fields
FlexExtForceField* mForceFieldsGpu;
int mMaxForceFields;
int mNumForceFields;
int* mTmpActiveIndicesGpu;
Vec4* mTmpParticlesGpu;
Vec3* mTmpVelocitiesGpu;
int mMaxTmpParticles;
// needs compact
bool mNeedsCompact;
// needs to update active list
bool mNeedsActiveListRebuild;
};
namespace
{
const int kNumThreadsPerBlock = 256;
// writes data to the device depending on source type
void WriteDeviceData(void *dst, const void *src, size_t count, FlexMemory source)
{
cudaMemcpyKind kind;
// host or device source
if (source == eFlexMemoryHost || source == eFlexMemoryHostAsync)
kind = cudaMemcpyHostToDevice;
else
kind = cudaMemcpyDeviceToDevice;
// synchronous or async copy
if (source == eFlexMemoryHostAsync || source == eFlexMemoryDeviceAsync)
{
cudaCheck(cudaMemcpyAsync(dst, src, count, kind, 0));
}
else
{
cudaCheck(cudaMemcpy(dst, src, count, kind));
}
}
// compacts all constraints into linear arrays
void CompactObjects(FlexExtContainer* c)
{
// rigids
c->mRigidOffsets.resize(1);
c->mRigidIndices.resize(0);
c->mRigidCoefficients.resize(0);
c->mRigidRotations.resize(0);
c->mRigidLocalPositions.resize(0);
c->mRigidTranslations.resize(0);
int totalNumSprings = 0;
int totalNumTris = 0;
// pre-calculate array sizes
for (size_t i=0; i < c->mInstances.size(); ++i)
{
FlexExtInstance* inst = c->mInstances[i];
const FlexExtAsset* asset = inst->mAsset;
// index into the triangle array for this instance
inst->mTriangleIndex = totalNumTris;
totalNumSprings += asset->mNumSprings;
totalNumTris += asset->mNumTriangles;
}
// springs
c->mSpringIndices.resize(totalNumSprings*2);
c->mSpringLengths.resize(totalNumSprings);
c->mSpringCoefficients.resize(totalNumSprings);
// cloth
c->mTriangleIndices.resize(totalNumTris*3);
c->mTriangleNormals.resize(totalNumTris);
// inflatables
c->mInflatableStarts.resize(0);
c->mInflatableCounts.resize(0);
c->mInflatableRestVolumes.resize(0);
c->mInflatableCoefficients.resize(0);
c->mInflatableOverPressures.resize(0);
int* __restrict dstSpringIndices = (totalNumSprings)?&c->mSpringIndices[0]:NULL;
float* __restrict dstSpringLengths = (totalNumSprings)?&c->mSpringLengths[0]:NULL;
float* __restrict dstSpringCoefficients = (totalNumSprings)?&c->mSpringCoefficients[0]:NULL;
int* __restrict dstTriangleIndices = (totalNumTris)?&c->mTriangleIndices[0]:NULL;
// go through each instance and update springs, rigids, etc
for (size_t i=0; i < c->mInstances.size(); ++i)
{
FlexExtInstance* inst = c->mInstances[i];
const FlexExtAsset* asset = inst->mAsset;
// map indices from the asset to the instance
const int* __restrict remap = &inst->mParticleIndices[0];
// flatten spring data
int numSprings = asset->mNumSprings;
const int numSpringIndices = asset->mNumSprings*2;
const int* __restrict srcSpringIndices = asset->mSpringIndices;
for (int i=0; i < numSpringIndices; ++i)
{
*dstSpringIndices = remap[*srcSpringIndices];
++dstSpringIndices;
++srcSpringIndices;
}
memcpy(dstSpringLengths, asset->mSpringRestLengths, numSprings*sizeof(float));
memcpy(dstSpringCoefficients, asset->mSpringCoefficients, numSprings*sizeof(float));
dstSpringLengths += numSprings;
dstSpringCoefficients += numSprings;
// rigids
if (asset->mRigidStiffness > 0.0f)
{
inst->mRigidIndex = int(c->mRigidOffsets.size())-1;
for (int i=0; i < asset->mNumParticles; ++i)
{
c->mRigidIndices.push_back(remap[i]);
c->mRigidLocalPositions.push_back(Vec3(&asset->mParticles[i*4])-Vec3(asset->mRigidCenter));
}
// start index of rigid
c->mRigidOffsets.push_back(int(c->mRigidIndices.size()));
c->mRigidCoefficients.push_back(asset->mRigidStiffness);
c->mRigidRotations.push_back(Matrix33::Identity());
c->mRigidTranslations.push_back(Vec3());
}
if (asset->mNumTriangles)
{
// triangles
const int numTriIndices = asset->mNumTriangles*3;
const int* __restrict srcTriIndices = asset->mTriangleIndices;
for (int i=0; i < numTriIndices; ++i)
{
*dstTriangleIndices = remap[*srcTriIndices];
++dstTriangleIndices;
++srcTriIndices;
}
if (asset->mInflatable)
{
c->mInflatableStarts.push_back(inst->mTriangleIndex);
c->mInflatableCounts.push_back(asset->mNumTriangles);
c->mInflatableRestVolumes.push_back(asset->mInflatableVolume);
c->mInflatableCoefficients.push_back(asset->mInflatableStiffness);
c->mInflatableOverPressures.push_back(asset->mInflatablePressure);
}
}
}
// springs
if (c->mSpringLengths.size())
flexSetSprings(c->mSolver, &c->mSpringIndices[0], &c->mSpringLengths[0], &c->mSpringCoefficients[0], int(c->mSpringLengths.size()), eFlexMemoryHostAsync);
else
flexSetSprings(c->mSolver, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
// rigids
if (c->mRigidCoefficients.size())
flexSetRigids(c->mSolver, &c->mRigidOffsets[0], &c->mRigidIndices[0], (float*)&c->mRigidLocalPositions[0], NULL, &c->mRigidCoefficients[0], (float*)&c->mRigidRotations[0], int(c->mRigidCoefficients.size()), eFlexMemoryHostAsync);
else
flexSetRigids(c->mSolver, NULL, NULL, NULL, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
// triangles
if (c->mTriangleIndices.size())
flexSetDynamicTriangles(c->mSolver, &c->mTriangleIndices[0], NULL, int(c->mTriangleIndices.size()/3), eFlexMemoryHostAsync);
else
flexSetDynamicTriangles(c->mSolver, NULL, NULL, 0, eFlexMemoryHostAsync);
// inflatables
if (c->mInflatableCounts.size())
flexSetInflatables(c->mSolver, &c->mInflatableStarts[0], &c->mInflatableCounts[0], &c->mInflatableRestVolumes[0], &c->mInflatableOverPressures[0], &c->mInflatableCoefficients[0], int(c->mInflatableCounts.size()),eFlexMemoryHost);
else
flexSetInflatables(c->mSolver, NULL, NULL, NULL, NULL, NULL, 0, eFlexMemoryHostAsync);
c->mNeedsCompact = false;
}
} // anonymous namespace
FlexExtContainer* flexExtCreateContainer(FlexSolver* solver, int maxParticles)
{
FlexExtContainer* c = new FlexExtContainer();
c->mSolver = solver;
c->mMaxParticles = maxParticles;
// initialize free list
c->mFreeList.resize(maxParticles);
for (int i=0; i < maxParticles; ++i)
c->mFreeList[i] = i;
c->mActiveList.resize(maxParticles);
c->mParticles.resize(maxParticles);
c->mVelocities.resize(maxParticles);
c->mPhases.resize(maxParticles);
c->mNormals.resize(maxParticles);
// force fields
c->mForceFieldsGpu = NULL;
c->mMaxForceFields = 0;
c->mNumForceFields = 0;
c->mTmpActiveIndicesGpu = NULL;
c->mTmpParticlesGpu = NULL;
c->mTmpVelocitiesGpu = NULL;
c->mMaxTmpParticles = 0;
c->mNeedsCompact = false;
return c;
}
void flexExtDestroyContainer(FlexExtContainer* c)
{
// force fields
cudaCheck(cudaFree(c->mForceFieldsGpu));
cudaCheck(cudaFree(c->mTmpActiveIndicesGpu));
cudaCheck(cudaFree(c->mTmpParticlesGpu));
cudaCheck(cudaFree(c->mTmpVelocitiesGpu));
delete c;
}
int flexExtAllocParticles(FlexExtContainer* c, int n, int* indices)
{
const int numToAlloc = Min(int(c->mFreeList.size()), n);
const int start = int(c->mFreeList.size())-numToAlloc;
if (numToAlloc)
{
memcpy(indices, &c->mFreeList[start], numToAlloc*sizeof(int));
c->mFreeList.resize(start);
}
c->mNeedsActiveListRebuild = true;
return numToAlloc;
}
void flexExtFreeParticles(FlexExtContainer* c, int n, const int* indices)
{
#if _DEBUG
for (int i=0; i < n; ++i)
{
// check valid values
assert(indices[i] >= 0 && indices[i] < int(c->mFreeList.capacity()));
// check for double delete
assert(std::find(c->mFreeList.begin(), c->mFreeList.end(), indices[i]) == c->mFreeList.end());
}
#endif
c->mFreeList.insert(c->mFreeList.end(), indices, indices+n);
c->mNeedsActiveListRebuild = true;
}
int flexExtGetActiveList(FlexExtContainer* c, int* indices)
{
int count = 0;
Bitmap inactive(c->mMaxParticles);
// create bitmap
for (size_t i=0; i < c->mFreeList.size(); ++i)
{
// if this fires then somehow a duplicate has ended up in the free list (double delete)
assert(!inactive.IsSet(c->mFreeList[i]));
inactive.Set(c->mFreeList[i]);
}
// iterate bitmap to find active elements
for (int i=0; i < c->mMaxParticles; ++i)
if (inactive.IsSet(i) == false)
indices[count++] = i;
return count;
}
void flexExtGetParticleData(FlexExtContainer* c, float** particles, float** velocities, int** phases, float** normals)
{
if (particles && c->mParticles.size())
*particles = (float*)&c->mParticles[0];
if (velocities && c->mVelocities.size())
*velocities = (float*)&c->mVelocities[0];
if (phases && c->mPhases.size())
*phases = (int*)&c->mPhases[0];
if (normals && c->mNormals.size())
*normals = (float*)&c->mNormals[0];
}
void flexExtGetTriangleData(FlexExtContainer* c, int** indices, float** normals)
{
if (indices && c->mTriangleIndices.size())
*indices = &c->mTriangleIndices[0];
if (normals && c->mTriangleNormals.size())
*normals = (float*)&c->mTriangleNormals[0];
}
void flexExtGetRigidData(FlexExtContainer* c, float** rotations, float** positions)
{
if (rotations && c->mRigidRotations.size())
*rotations = (float*)&c->mRigidRotations[0];
if (positions && c->mRigidTranslations.size())
*positions = (float*)&c->mRigidTranslations[0];
}
FlexExtInstance* flexExtCreateInstance(FlexExtContainer* c, const FlexExtAsset* asset, const float* transform, float vx, float vy, float vz, int phase, float invMassScale)
{
const int numParticles = asset->mNumParticles;
// check if asset will fit
if (int(c->mFreeList.size()) < numParticles)
return NULL;
FlexExtInstance* inst = new FlexExtInstance();
inst->mAsset = asset;
inst->mTriangleIndex = -1;
inst->mRigidIndex = -1;
inst->mInflatableIndex = -1;
inst->mUserData = NULL;
inst->mNumParticles = numParticles;
// allocate particles for instance
inst->mParticleIndices = new int[numParticles];
int n = flexExtAllocParticles(c, numParticles, &inst->mParticleIndices[0]);
assert(n == numParticles);
(void)n;
c->mInstances.push_back(inst);
const Matrix44 xform(transform);
for (int i=0; i < numParticles; ++i)
{
const int index = inst->mParticleIndices[i];
// add transformed particles to the container
c->mParticles[index] = xform*Vec4(Vec3(&asset->mParticles[i*4]), 1.0f);
c->mParticles[index].w = asset->mParticles[i*4+3]*invMassScale;
c->mVelocities[index] = Vec3(vx, vy, vz);
c->mPhases[index] = phase;
c->mNormals[index] = Vec4(0.0f);
}
c->mNeedsCompact = true;
c->mNeedsActiveListRebuild = true;
return inst;
}
void flexExtDestroyInstance(FlexExtContainer* c, const FlexExtInstance* inst)
{
flexExtFreeParticles(c, inst->mNumParticles, &inst->mParticleIndices[0]);
delete[] inst->mParticleIndices;
// TODO: O(N) remove
std::vector<FlexExtInstance*>::iterator iter = std::find(c->mInstances.begin(), c->mInstances.end(), inst);
assert(iter != c->mInstances.end());
c->mInstances.erase(iter);
c->mNeedsCompact = true;
c->mNeedsActiveListRebuild = true;
delete inst;
}
void flexExtTickContainer(FlexExtContainer* c, float dt, int substeps, FlexTimers* timers)
{
// update the device
flexExtPushToDevice(c);
// update solver
flexUpdateSolver(c->mSolver, dt, substeps, timers);
// update host
flexExtPullFromDevice(c);
// ensure memory transfers have finished
flexSetFence();
flexWaitFence();
}
void flexExtPushToDevice(FlexExtContainer* c)
{
if (c->mNeedsActiveListRebuild)
{
// update active list
int n = flexExtGetActiveList(c, &c->mActiveList[0]);
flexSetActive(c->mSolver, &c->mActiveList[0], n, eFlexMemoryHostAsync);
c->mNeedsActiveListRebuild = false;
}
// push any changes to soler
flexSetParticles(c->mSolver, (float*)&c->mParticles[0], int(c->mParticles.size()), eFlexMemoryHostAsync);
flexSetVelocities(c->mSolver, (float*)&c->mVelocities[0], int(c->mVelocities.size()), eFlexMemoryHostAsync);
flexSetPhases(c->mSolver, &c->mPhases[0], int(c->mPhases.size()), eFlexMemoryHostAsync);
flexSetNormals(c->mSolver, (float*)&c->mNormals[0], int(c->mNormals.size()), eFlexMemoryHostAsync);
if (c->mNeedsCompact)
CompactObjects(c);
}
void flexExtPullFromDevice(FlexExtContainer* c)
{
// read back particle data
flexGetParticles(c->mSolver, (float*)&c->mParticles[0], int(c->mParticles.size()), eFlexMemoryHostAsync);
flexGetVelocities(c->mSolver, (float*)&c->mVelocities[0], int(c->mVelocities.size()), eFlexMemoryHostAsync);
flexGetPhases(c->mSolver, &c->mPhases[0], int(c->mPhases.size()), eFlexMemoryHostAsync);
flexGetNormals(c->mSolver, (float*)&c->mNormals[0], int(c->mNormals.size()), eFlexMemoryHostAsync);
// read back rigid transforms
if (c->mRigidCoefficients.size())
flexGetRigidTransforms(c->mSolver, (float*)&c->mRigidRotations[0], (float*)&c->mRigidTranslations[0], eFlexMemoryHostAsync);
}
namespace
{
struct Key
{
Key(int i, float d) : index(i), depth(d) {}
int index;
float depth;
bool operator < (const Key& rhs) const { return depth < rhs.depth; }
};
}
int flexExtCreateWeldedMeshIndices(const float* vertices, int numVertices, int* uniqueIndices, int* originalToUniqueMap, float threshold)
{
memset(originalToUniqueMap, -1, numVertices*sizeof(int));
const Vec3* positions = (const Vec3*)vertices;
// use a sweep and prune style search to accelerate neighbor finding
std::vector<Key> keys;
for (int i=0; i < numVertices; i++)
keys.push_back(Key(i, positions[i].z));
std::sort(keys.begin(), keys.end());
int uniqueCount = 0;
// sweep keys to find matching verts
for (int i=0; i < numVertices; ++i)
{
// we are a duplicate, skip
if (originalToUniqueMap[keys[i].index] != -1)
continue;
// scan forward until no vertex can be closer than threshold
for (int j=i+1; j < numVertices && (keys[j].depth-keys[i].depth) <= threshold; ++j)
{
float distance = Length(Vector3(positions[keys[i].index])-Vector3(positions[keys[j].index]));
if (distance <= threshold)
originalToUniqueMap[keys[j].index] = uniqueCount;
}
originalToUniqueMap[keys[i].index] = uniqueCount;
uniqueIndices[uniqueCount++] = keys[i].index;
}
return uniqueCount;
}
namespace
{
float SampleSDF(const float* sdf, int dim, int x, int y, int z)
{
assert(x < dim && x >= 0);
assert(y < dim && y >= 0);
assert(z < dim && z >= 0);
return sdf[z*dim*dim + y*dim + x];
}
// return normal of signed distance field
Vec3 SampleSDFGrad(const float* sdf, int dim, int x, int y, int z)
{
int x0 = std::max(x-1, 0);
int x1 = std::min(x+1, dim-1);
int y0 = std::max(y-1, 0);
int y1 = std::min(y+1, dim-1);
int z0 = std::max(z-1, 0);
int z1 = std::min(z+1, dim-1);
float dx = (SampleSDF(sdf, dim, x1, y, z) - SampleSDF(sdf, dim, x0, y, z))*(dim*0.5f);
float dy = (SampleSDF(sdf, dim, x, y1, z) - SampleSDF(sdf, dim, x, y0, z))*(dim*0.5f);
float dz = (SampleSDF(sdf, dim, x, y, z1) - SampleSDF(sdf, dim, x, y, z0))*(dim*0.5f);
return Vec3(dx, dy, dz);
}
} // anonymous namespace
FlexExtAsset* flexExtCreateRigidFromMesh(const float* vertices, int numVertices, const int* indices, int numTriangleIndices, float spacing)
{
std::vector<Vec4> particles;
std::vector<Vec4> normals;
std::vector<int> phases;
const Vec3* positions = (Vec3*)vertices;
Vec3 meshLower(FLT_MAX), meshUpper(-FLT_MAX);
for (int i=0; i < numVertices; ++i)
{
meshLower = Min(meshLower, positions[i]);
meshUpper = Max(meshUpper, positions[i]);
}
Vec3 edges = meshUpper-meshLower;
float maxEdge = std::max(std::max(edges.x, edges.y), edges.z);
// tweak spacing to avoid edge cases for particles laying on the boundary
// just covers the case where an edge is a whole multiple of the spacing.
float spacingEps = spacing*(1.0f - 1e-4f);
// make sure to have at least one particle in each dimension
int dx, dy, dz;
dx = spacing > edges.x ? 1 : int(edges.x/spacingEps);
dy = spacing > edges.y ? 1 : int(edges.y/spacingEps);
dz = spacing > edges.z ? 1 : int(edges.z/spacingEps);
int maxDim = std::max(std::max(dx, dy), dz);
// expand border by two voxels to ensure adequate sampling at edges
meshLower -= 2.0f*Vec3(spacing);
meshUpper += 2.0f*Vec3(spacing);
maxDim += 4;
// we shift the voxelization bounds so that the voxel centers
// lie symmetrically to the center of the object. this reduces the
// chance of missing features, and also better aligns the particles
// with the mesh
Vec3 meshOffset;
meshOffset.x = 0.5f * (spacing - (edges.x - (dx-1)*spacing));
meshOffset.y = 0.5f * (spacing - (edges.y - (dy-1)*spacing));
meshOffset.z = 0.5f * (spacing - (edges.z - (dz-1)*spacing));
meshLower -= meshOffset;
// don't allow samplings with > 64 per-side
if (maxDim > 64)
return NULL;
std::vector<uint32_t> voxels(maxDim*maxDim*maxDim);
Voxelize(vertices, numVertices, indices, numTriangleIndices, maxDim, maxDim, maxDim, &voxels[0], meshLower, meshLower + Vec3(maxDim*spacing));
std::vector<float> sdf(maxDim*maxDim*maxDim);
MakeSDF(&voxels[0], maxDim, maxDim, maxDim, &sdf[0]);
Vec3 center;
for (int x=0; x < maxDim; ++x)
{
for (int y=0; y < maxDim; ++y)
{
for (int z=0; z < maxDim; ++z)
{
const int index = z*maxDim*maxDim + y*maxDim + x;
// if voxel is marked as occupied the add a particle
if (voxels[index])
{
Vec3 position = meshLower + spacing*Vec3(float(x) + 0.5f, float(y) + 0.5f, float(z) + 0.5f);
// normalize the sdf value and transform to world scale
Vec3 n = SafeNormalize(SampleSDFGrad(&sdf[0], maxDim, x, y, z));
float d = sdf[index]*maxEdge;
normals.push_back(Vec4(n, d));
particles.push_back(Vec4(position.x, position.y, position.z, 1.0f));
phases.push_back(0);
center += position;
}
}
}
}
FlexExtAsset* asset = new FlexExtAsset();
memset(asset, 0, sizeof(asset));
if (particles.size())
{
// store center of mass
center /= float(particles.size());
asset->mRigidCenter[0] = center.x;
asset->mRigidCenter[1] = center.y;
asset->mRigidCenter[2] = center.z;
asset->mNumParticles = int(particles.size());
asset->mParticles = new float[particles.size()*4];
memcpy(asset->mParticles, &particles[0], sizeof(Vec4)*particles.size());
// todo: normals
}
return asset;
}
FlexExtAsset* flexExtCreateClothFromMesh(const float* particles, int numVertices, const int* indices, int numTriangles, float stretchStiffness, float bendStiffness, float tetherStiffness, float tetherGive, float pressure)
{
FlexExtAsset* asset = new FlexExtAsset();
memset(asset, 0, sizeof(asset));
asset->mParticles = new float[numVertices*4];
memcpy(asset->mParticles, particles, sizeof(float)*4);
asset->mTriangleIndices = new int[numTriangles*3];
memcpy(asset->mTriangleIndices, indices, numTriangles*3*sizeof(int));
asset->mNumParticles = numVertices;
asset->mNumTriangles = numTriangles;
// create cloth mesh
ClothMesh cloth((Vec4*)particles, numVertices, indices, numTriangles*3, stretchStiffness, bendStiffness, true);
if (cloth.mValid)
{
// create tethers
if (tetherStiffness > 0.0f)
{
std::vector<int> anchors;
anchors.reserve(numVertices);
// find anchors
for (int i=0; i < numVertices; ++i)
{
Vec4& particle = ((Vec4*)particles)[i];
if (particle.w == 0.0f)
anchors.push_back(i);
}
if (anchors.size())
{
// create tethers
for (int i=0; i < numVertices; ++i)
{
Vec4& particle = ((Vec4*)particles)[i];
if (particle.w == 0.0f)
continue;
float minSqrDist = FLT_MAX;
int minIndex = -1;
// find the closest attachment point
for (int a=0; a < int(anchors.size()); ++a)
{
Vec4& attachment = ((Vec4*)particles)[anchors[a]];
float distSqr = LengthSq(Vec3(particle)-Vec3(attachment));
if (distSqr < minSqrDist)
{
minSqrDist = distSqr;
minIndex = anchors[a];
}
}
// add a tether
if (minIndex != -1)
{
cloth.mConstraintIndices.push_back(i);
cloth.mConstraintIndices.push_back(minIndex);
cloth.mConstraintRestLengths.push_back(sqrtf(minSqrDist)*(1.0f + tetherGive));
// negative stiffness indicates tether (unilateral constraint)
cloth.mConstraintCoefficients.push_back(-tetherStiffness);
}
}
}
}
const int numSprings = int(cloth.mConstraintCoefficients.size());
asset->mSpringIndices = new int[numSprings*2];
asset->mSpringCoefficients = new float[numSprings];
asset->mSpringRestLengths = new float[numSprings];
asset->mNumSprings = numSprings;
for (int i=0; i < numSprings; ++i)
{
asset->mSpringIndices[i*2+0] = cloth.mConstraintIndices[i*2+0];
asset->mSpringIndices[i*2+1] = cloth.mConstraintIndices[i*2+1];
asset->mSpringRestLengths[i] = cloth.mConstraintRestLengths[i];
asset->mSpringCoefficients[i] = cloth.mConstraintCoefficients[i];
}
if (pressure > 0.0f)
{
asset->mInflatable = true;
asset->mInflatableVolume = cloth.mRestVolume;
asset->mInflatableStiffness = cloth.mConstraintScale;
asset->mInflatablePressure = pressure;
}
}
else
{
flexExtDestroyAsset(asset);
return NULL;
}
return asset;
}
void flexExtDestroyAsset(FlexExtAsset* asset)
{
delete[] asset->mParticles;
delete[] asset->mSpringIndices;
delete[] asset->mSpringCoefficients;
delete[] asset->mSpringRestLengths;
delete[] asset->mTriangleIndices;
delete asset;
}
void flexExtSetForceFields(FlexExtContainer* c, const FlexExtForceField* forceFields, int numForceFields, FlexMemory source)
{
// re-alloc if necessary
if (numForceFields > c->mMaxForceFields)
{
cudaCheck(cudaFree(c->mForceFieldsGpu));
cudaCheck(cudaMalloc(&c->mForceFieldsGpu, sizeof(FlexExtForceField)*numForceFields));
c->mMaxForceFields = numForceFields;
}
c->mNumForceFields = numForceFields;
if (numForceFields > 0)
{
WriteDeviceData(c->mForceFieldsGpu, forceFields, numForceFields*sizeof(FlexExtForceField), source);
}
}
__global__ void UpdateForceFields(int numParticles, const int* __restrict__ activeIndices, const Vec4* __restrict__ positions, Vec3* __restrict__ velocities, const FlexExtForceField* __restrict__ forceFields, int numForceFields, float dt)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
for (int f = 0; f < numForceFields; f++)
{
const FlexExtForceField& forceField = forceFields[f];
if (i < numParticles)
{
const int index = activeIndices[i];
Vec4 p = positions[index];
Vec3 v = velocities[index];
Vec3 localPos = Vec3(p.x, p.y, p.z) - Vec3(forceField.mPosition[0], forceField.mPosition[1], forceField.mPosition[2]);
float length = Length(localPos);
if (length > forceField.mRadius)
{
continue;
}
Vec3 fieldDir;
if (length > 0.0f)
{
fieldDir = localPos / length;
}
else
{
fieldDir = localPos;
}
// If using linear falloff, scale with distance.
float fieldStrength = forceField.mStrength;
if (forceField.mLinearFalloff)
{
fieldStrength *= (1.0f - (length / forceField.mRadius));
}
// Apply force
Vec3 force = localPos * fieldStrength;
float unitMultiplier;
if (forceField.mMode == eFlexExtModeForce)
{
unitMultiplier = dt * p.w; // time/mass
}
else if (forceField.mMode == eFlexExtModeImpulse)
{
unitMultiplier = p.w; // 1/mass
}
else if (forceField.mMode == eFlexExtModeVelocityChange)
{
unitMultiplier = 1.0f;
}
Vec3 deltaVelocity = fieldDir * fieldStrength * unitMultiplier;
velocities[index] = v + deltaVelocity;
}
}
}
void flexExtApplyForceFields(FlexExtContainer* c, float dt)
{
int numParticles = flexGetActiveCount(c->mSolver);
if (numParticles && c->mNumForceFields)
{
// reallocate temp buffers if necessary
if (int(c->mParticles.size()) > c->mMaxTmpParticles)
{
c->mMaxTmpParticles = int(c->mParticles.size());
cudaCheck(cudaFree(c->mTmpActiveIndicesGpu));
cudaCheck(cudaFree(c->mTmpParticlesGpu));
cudaCheck(cudaFree(c->mTmpVelocitiesGpu));
cudaCheck(cudaMalloc(&c->mTmpActiveIndicesGpu, sizeof(int)*c->mMaxTmpParticles));
cudaCheck(cudaMalloc(&c->mTmpParticlesGpu, sizeof(Vec4)*c->mMaxTmpParticles));
cudaCheck(cudaMalloc(&c->mTmpVelocitiesGpu, sizeof(Vec3)*c->mMaxTmpParticles));
}
flexGetActive(c->mSolver, c->mTmpActiveIndicesGpu, eFlexMemoryDeviceAsync);
flexGetParticles(c->mSolver, (float*)c->mTmpParticlesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
flexGetVelocities(c->mSolver, (float*)c->mTmpVelocitiesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
const int kNumBlocks = (numParticles+kNumThreadsPerBlock-1)/kNumThreadsPerBlock;
UpdateForceFields<<<kNumBlocks, kNumThreadsPerBlock>>>(
numParticles,
c->mTmpActiveIndicesGpu,
c->mTmpParticlesGpu,
c->mTmpVelocitiesGpu,
c->mForceFieldsGpu,
c->mNumForceFields,
dt
);
flexSetVelocities(c->mSolver, (float*)c->mTmpVelocitiesGpu, int(c->mParticles.size()), eFlexMemoryDeviceAsync);
}
}
|
3e53652b2500b168ab06b439fb9bff342808cbad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// rasterizer.c
// rasterizer
//
// Created by Robert Crosby on 4/15/12.
// Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "rasterizer.h"
/*
check_barycentric
----------------------------------------------------------------------------
Checks if the barycentric coordinates are inside.
Takes a vec3_t with barycentric coordinates.
Returns 1 if none of the coordinates are negative or 0 otherwise.
*/
static int check_barycentric(vec3_t *coords) {
return coords->x >= 0 && coords->y >= 0 && coords->z >= 0;
}
/*
get_barycentric
----------------------------------------------------------------------------
Finds the barycentric coordinates of a point on a polygon.
Takes a polygon_t and an x and y intenter for the location on the polygon.
Returns a vec3_t with the three barycentric coordinates.
*/
static vec3_t get_barycentric(const polygon_t *poly, int x, int y) {
vec3_t coords;
coords.x = x * poly->loc2.y;
coords.x -= x * poly->loc3.y;
coords.x -= poly->loc2.x * y;
coords.x += poly->loc2.x * poly->loc3.y;
coords.x += poly->loc3.x * y;
coords.x -= poly->loc3.x * poly->loc2.y;
coords.x /= poly->det;
coords.y = poly->loc1.x * y;
coords.y -= poly->loc1.x * poly->loc3.y;
coords.y -= x * poly->loc1.y;
coords.y += x * poly->loc3.y;
coords.y += poly->loc3.x * poly->loc1.y;
coords.y -= poly->loc3.x * y;
coords.y /= poly->det;
coords.z = poly->loc1.x * poly->loc2.y;
coords.z -= poly->loc1.x * y;
coords.z -= poly->loc2.x * poly->loc1.y;
coords.z += poly->loc2.x * y;
coords.z += x * poly->loc1.y;
coords.z -= x * poly->loc2.y;
coords.z /= poly->det;
return coords;
}
/*
polygon_set_det
----------------------------------------------------------------------------
Sets the determinate for the polygon for use latter with get_barycentric.
Takes a polygon_t
*/
static void polygon_set_det(polygon_t *polygon) {
float det = polygon->loc1.x * polygon->loc2.y;
det -= polygon->loc1.x * polygon->loc3.y;
det -= polygon->loc2.x * polygon->loc1.y;
det += polygon->loc2.x * polygon->loc3.y;
det += polygon->loc3.x * polygon->loc1.y;
det -= polygon->loc3.x * polygon->loc2.y;
polygon->det = det;
}
/*
polygon_draw_pixel
----------------------------------------------------------------------------
Draws to a given pixel with the data of a given polygon.
Takes a color_t and float pointer for the pixel, a polygon_t, and an x and y for the pixel location.
*/
static void polygon_draw_pixel(color_t *pixel, float *zbuffer, polygon_t *polygon, int x, int y) {
vec3_t bary;
vec3_t colorf;
float zvalue;
// get and check the bary centric coordinates
bary = get_barycentric(polygon, x, y);
if (!check_barycentric(&bary))
return;
// check the zbuffer
zvalue = bary.x * polygon->zValues.x + bary.y * polygon->zValues.y + bary.z * polygon->zValues.z;
if (zvalue < *zbuffer)
return;
*zbuffer = zvalue;
colorf.x = bary.x * polygon->color1.x + bary.y * polygon->color2.x + bary.z * polygon->color3.x;
colorf.y = bary.x * polygon->color1.y + bary.y * polygon->color2.y + bary.z * polygon->color3.y;
colorf.z = bary.x * polygon->color1.z + bary.y * polygon->color2.z + bary.z * polygon->color3.z;
// draw the pixel
*pixel = vec3_to_color(&colorf);
}
/*
polygon_draw
----------------------------------------------------------------------------
Iterates over the area defined in the polygon calling polygon_draw_pixel.
Takes a drawbuffer_t to draw to and a polygon_t.
*/
static void polygon_draw(drawbuffer_t *drawbuffer, polygon_t *polygon) {
for (int i = polygon->low.x; i < polygon->high.x; ++i) {
for (int j = polygon->low.y; j < polygon->high.y; ++j) {
color_t *pixel = drawbuffer_get_color_at(drawbuffer, i, j);
float *zvalue = drawbuffer_get_zvalue_at(drawbuffer, i, j);
polygon_draw_pixel(pixel, zvalue, polygon, i, j);
}
}
}
/*
polygon_create
----------------------------------------------------------------------------
Creates a polygon_t from a given triangle_t.
Takes a triangle_t pointer, a vertex_t pointer to an array, and width and height of the drawable area.
Returns a pointer to a polygon_t or NULL if the polygon is back facing.
*/
static polygon_t* polygon_create(ivec3_t *triangle, vertex_t *vertices, int width, int height, mat4_t *mtx) {
polygon_t *polygon;
vertex_t *v1, *v2, *v3;
vec3_t loc1, loc2, loc3;
// get the vertices of the triangle.
v1 = vertices + triangle->x;
v2 = vertices + triangle->y;
v3 = vertices + triangle->z;
// translate the locations
loc1 = mat4_translate_point(mtx, &v1->location);
loc2 = mat4_translate_point(mtx, &v2->location);
loc3 = mat4_translate_point(mtx, &v3->location);
// create the polygon and add the coordinates from the triangle vertices.
polygon = (polygon_t *)malloc(sizeof(polygon_t));
polygon->loc1 = vec3_to_ivec2(&loc1);
polygon->loc2 = vec3_to_ivec2(&loc2);
polygon->loc3 = vec3_to_ivec2(&loc3);
// find the high screen bounds.
polygon->high.x = polygon->loc1.x > polygon->loc2.x ? polygon->loc1.x : polygon->loc2.x;
polygon->high.x = polygon->high.x > polygon->loc3.x ? polygon->high.x : polygon->loc3.x;
polygon->high.x = polygon->high.x >= width ? width - 1 : polygon->high.x;
polygon->high.y = polygon->loc1.y > polygon->loc2.y ? polygon->loc1.y : polygon->loc2.y;
polygon->high.y = polygon->high.y > polygon->loc3.y ? polygon->high.y : polygon->loc3.y;
polygon->high.y = polygon->high.y >= height ? height - 1 : polygon->high.y;
// find the low screen bounds.
polygon->low.x = polygon->loc1.x < polygon->loc2.x ? polygon->loc1.x : polygon->loc2.x;
polygon->low.x = polygon->low.x < polygon->loc3.x ? polygon->low.x : polygon->loc3.x;
polygon->low.x = polygon->low.x < 0 ? 0 : polygon->low.x;
polygon->low.y = polygon->loc1.y < polygon->loc2.y ? polygon->loc1.y : polygon->loc2.y;
polygon->low.y = polygon->low.y < polygon->loc3.y ? polygon->low.y : polygon->loc3.y;
polygon->low.y = polygon->low.y < 0 ? 0 : polygon->low.y;
// get the color and z values from the triangle.
polygon->zValues.x = loc1.z;
polygon->color1 = v1->color;
polygon->zValues.y = loc2.z;
polygon->color2 = v2->color;
polygon->zValues.z = loc3.z;
polygon->color3 = v3->color;
polygon_set_det(polygon);
return polygon;
}
/*
check_barycentric
----------------------------------------------------------------------------
Creates a polygon_t from a given triangle_t.
Takes a triangle_t pointer, a vertex_t pointer to an array, and width and height of the drawable area.
Returns a pointer to a polygon_t or NULL if the polygon is back facing.
*/
void rasterize_mesh(drawbuffer_t *buffers, mesh_t *mesh, int duplicates) {
mat4_t mtxScale, mtx;
float scale;
int subDivs = ceil(sqrtf(duplicates));
scale = buffers->width < buffers->height ? buffers->width : buffers->height;
scale /= subDivs;
mtxScale = mat4_scaled(scale, scale, 1.0f);
for (int i = 0; i < duplicates; ++i) {
mtx = mtxScale;
mat4_translate3f(&mtx, scale * (i/subDivs), scale * (i%subDivs), 0.0f);
for (int j = 0; j < mesh->triangleCount; ++j) {
ivec3_t *triangle = mesh->triangles + j;
polygon_t *polygon = polygon_create(triangle, mesh->vertices, buffers->width, buffers->height, &mtx);
// draw the polygons.
if (polygon != NULL) {
polygon_draw(buffers, polygon);
}
free(polygon);
}
}
}
/*
----------------------------------------------------------------------------
CUDA Code
----------------------------------------------------------------------------
*/
__device__ void cuda_mat4_translate_point_r(mat4_t *m, vec3_t *pt) {
vec3_t newpt;
float w;
newpt.x = pt->x * m->x.x + pt->y * m->y.x + pt->z * m->z.x + m->w.x;
newpt.y = pt->x * m->x.y + pt->y * m->y.y + pt->z * m->z.y + m->w.y;
newpt.z = pt->x * m->x.z + pt->y * m->y.z + pt->z * m->z.z + m->w.z;
w = pt->x * m->x.w + pt->y * m->y.w + pt->z * m->z.w + m->w.w;
pt->x = newpt.x / w;
pt->y = newpt.y / w;
pt->z = newpt.z / w;
}
__global__ void cuda_create_polygons(polygon_t *polygons, vertex_t *vertices, ivec3_t *triangles, int polyCount, int width, int height, mat4_t *mtx) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
vertex_t *v1, *v2, *v3;
ivec3_t *tri;
vec3_t loc1, loc2, loc3;
polygon_t polygon, *polyRef;
if (i < polyCount) {
tri = triangles + i;
v1 = vertices + tri->x;
v2 = vertices + tri->y;
v3 = vertices + tri->z;
polyRef = polygons + i;
loc1 = v1->location;
loc2 = v2->location;
loc3 = v3->location;
cuda_mat4_translate_point_r(mtx, &loc1);
cuda_mat4_translate_point_r(mtx, &loc2);
cuda_mat4_translate_point_r(mtx, &loc3);
// locations
polygon.loc1.x = loc1.x;
polygon.loc1.y = loc1.y;
polygon.loc2.x = loc2.x;
polygon.loc2.y = loc2.y;
polygon.loc3.x = loc3.x;
polygon.loc3.y = loc3.y;
// find the high screen bounds.
polygon.high.x = polygon.loc1.x > polygon.loc2.x ? polygon.loc1.x : polygon.loc2.x;
polygon.high.x = polygon.high.x > polygon.loc3.x ? polygon.high.x : polygon.loc3.x;
polygon.high.x = polygon.high.x >= width ? width - 1 : polygon.high.x;
polygon.high.y = polygon.loc1.y > polygon.loc2.y ? polygon.loc1.y : polygon.loc2.y;
polygon.high.y = polygon.high.y > polygon.loc3.y ? polygon.high.y : polygon.loc3.y;
polygon.high.y = polygon.high.y >= height ? height - 1 : polygon.high.y;
// find the low screen bounds.
polygon.low.x = polygon.loc1.x < polygon.loc2.x ? polygon.loc1.x : polygon.loc2.x;
polygon.low.x = polygon.low.x < polygon.loc3.x ? polygon.low.x : polygon.loc3.x;
polygon.low.x = polygon.low.x < 0 ? 0 : polygon.low.x;
polygon.low.y = polygon.loc1.y < polygon.loc2.y ? polygon.loc1.y : polygon.loc2.y;
polygon.low.y = polygon.low.y < polygon.loc3.y ? polygon.low.y : polygon.loc3.y;
polygon.low.y = polygon.low.y < 0 ? 0 : polygon.low.y;
// get the z values
polygon.zValues.x = loc1.z;
polygon.zValues.y = loc2.z;
polygon.zValues.z = loc3.z;
// get the colors
polygon.color1 = v1->color;
polygon.color2 = v2->color;
polygon.color3 = v3->color;
// pre caluclate the lower part of barycentric.
polygon.det = polygon.loc1.x * polygon.loc2.y;
polygon.det -= polygon.loc1.x * polygon.loc3.y;
polygon.det -= polygon.loc2.x * polygon.loc1.y;
polygon.det += polygon.loc2.x * polygon.loc3.y;
polygon.det += polygon.loc3.x * polygon.loc1.y;
polygon.det -= polygon.loc3.x * polygon.loc2.y;
// save to global memory
*polyRef = polygon;
}
}
__global__ void cuda_clear_buffers(color_t *colorBuffer, float *zBuffer, int *locks, int numPixels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
color_t *color, clearColor = {0, 0, 0};
float *zVal, zClear = FLT_MIN;
int *lock;
if (i < numPixels) {
color = colorBuffer + i;
*color = clearColor;
zVal = zBuffer + i;
*zVal = zClear;
lock = locks + i;
*lock = 0;
}
}
__device__ vec3_t cuda_get_barycentric(polygon_t *poly, int x, int y) {
vec3_t coords;
coords.x = x * poly->loc2.y;
coords.x -= x * poly->loc3.y;
coords.x -= poly->loc2.x * y;
coords.x += poly->loc2.x * poly->loc3.y;
coords.x += poly->loc3.x * y;
coords.x -= poly->loc3.x * poly->loc2.y;
coords.x /= poly->det;
coords.y = poly->loc1.x * y;
coords.y -= poly->loc1.x * poly->loc3.y;
coords.y -= x * poly->loc1.y;
coords.y += x * poly->loc3.y;
coords.y += poly->loc3.x * poly->loc1.y;
coords.y -= poly->loc3.x * y;
coords.y /= poly->det;
coords.z = poly->loc1.x * poly->loc2.y;
coords.z -= poly->loc1.x * y;
coords.z -= poly->loc2.x * poly->loc1.y;
coords.z += poly->loc2.x * y;
coords.z += x * poly->loc1.y;
coords.z -= x * poly->loc2.y;
coords.z /= poly->det;
return coords;
}
__device__ void cuda_attomic_set_color(color_t *pixel, color_t *newColor, float *zBuffVal, float new_zVal, int *lock) {
int lockVal = 0;
while (lockVal = atomicExch(lock, lockVal));
if (*zBuffVal < new_zVal) {
*zBuffVal = new_zVal;
pixel->red = newColor->red;
pixel->green = newColor->green;
pixel->blue = newColor->blue;
}
atomicExch(lock, lockVal);
}
__device__ void cuda_draw_polygon_pixel(color_t *pixel, float *zBuffVal, int *lock, polygon_t *polygon, int x, int y) {
vec3_t bary;
float zVal;
color_t newColor;
bary = cuda_get_barycentric(polygon, x, y);
if (bary.x >= 0 && bary.y >= 0 && bary.z >= 0) {
zVal = bary.x * polygon->zValues.x + bary.y * polygon->zValues.y + bary.z * polygon->zValues.z;
newColor.red = (bary.x * polygon->color1.x + bary.y * polygon->color2.x + bary.z * polygon->color3.x) * 255;
newColor.green = (bary.x * polygon->color1.y + bary.y * polygon->color2.y + bary.z * polygon->color3.y) * 255;
newColor.blue = (bary.x * polygon->color1.z + bary.y * polygon->color2.z + bary.z * polygon->color3.z) * 255;
cuda_attomic_set_color(pixel, &newColor, zBuffVal, zVal, lock);
}
}
__device__ color_t* cuda_get_color_at(color_t *colorBuffer, int width, int x, int y) {
return colorBuffer + width*y + x;
}
__device__ float* cuda_get_zvalue_at(float *zBuffer, int width, int x, int y) {
return zBuffer + width*y + x;
}
__device__ int* cuda_get_lock_at(int *locks, int width, int x, int y) {
return locks + width*y + x;
}
__global__ void cuda_draw_polygons(polygon_t *polygons, int polyCount, color_t *colorBuffer, float *zBuffer, int *locks, int width, int height) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, k;
polygon_t *polygon;
color_t *pixel;
float *zVal;
int *lock;
if (i < polyCount) {
polygon = polygons + i;
for (j = polygon->low.x; j < polygon->high.x; ++j) {
for (k = polygon->low.y; k < polygon->high.y; ++k) {
pixel = cuda_get_color_at(colorBuffer, width, j, k);
zVal = cuda_get_zvalue_at(zBuffer, width, j, k);
lock = cuda_get_lock_at(locks, width, j, k);
cuda_draw_polygon_pixel(pixel, zVal, lock, polygon, j, k);
}
}
}
}
void create_polygons_cuda(mesh_t *mesh, int width, int height, mat4_t *mtx) {
int block_size = 16;
int num_blocks = mesh->triangleCount / block_size + (mesh->triangleCount % block_size == 0 ? 0 : 1);
mat4_t *d_mtx;
hipMalloc((void **) &d_mtx, sizeof(mat4_t));
hipMemcpy(d_mtx, mtx, sizeof(mat4_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_create_polygons) , dim3(num_blocks), dim3(block_size) , 0, 0, mesh->d_polygons, mesh->d_vertices, mesh->d_triangles, mesh->polygonCount, width, height, d_mtx);
hipFree(d_mtx);
}
void clear_buffers_cuda(drawbuffer_t *buffers) {
int block_size = 64;
int num_blocks = (buffers->width * buffers->height) / block_size + ((buffers->width * buffers->height) % block_size == 0 ? 0 : 1);
hipLaunchKernelGGL(( cuda_clear_buffers) , dim3(num_blocks), dim3(block_size) , 0, 0, buffers->d_colorBuffer, buffers->d_zBuffer, buffers->d_locks, buffers->width * buffers->height);
}
void rasterize_mesh_cuda(drawbuffer_t *buffers, mesh_t *mesh, int duplicates) {
mat4_t mtxScale, mtx;
float scale;
int subDivs = ceil(sqrtf(duplicates));
int block_size = 16;
int num_blocks = mesh->polygonCount / block_size + (mesh->polygonCount % block_size == 0 ? 0 : 1);
scale = buffers->width < buffers->height ? buffers->width : buffers->height;
scale /= subDivs;
mtxScale = mat4_scaled(scale, scale, 1.0f);
for (int i = 0; i < duplicates; ++i) {
mtx = mtxScale;
mat4_translate3f(&mtx, scale * (i/subDivs), scale * (i%subDivs), 0.0f);
create_polygons_cuda(mesh, buffers->width, buffers->height, &mtx);
hipLaunchKernelGGL(( cuda_draw_polygons) , dim3(num_blocks), dim3(block_size) , 0, 0, mesh->d_polygons, mesh->polygonCount, buffers->d_colorBuffer,
buffers->d_zBuffer, buffers->d_locks, buffers->width, buffers->height);
}
}
| 3e53652b2500b168ab06b439fb9bff342808cbad.cu | //
// rasterizer.c
// rasterizer
//
// Created by Robert Crosby on 4/15/12.
// Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "rasterizer.h"
/*
check_barycentric
----------------------------------------------------------------------------
Checks if the barycentric coordinates are inside.
Takes a vec3_t with barycentric coordinates.
Returns 1 if none of the coordinates are negative or 0 otherwise.
*/
static int check_barycentric(vec3_t *coords) {
return coords->x >= 0 && coords->y >= 0 && coords->z >= 0;
}
/*
get_barycentric
----------------------------------------------------------------------------
Finds the barycentric coordinates of a point on a polygon.
Takes a polygon_t and an x and y intenter for the location on the polygon.
Returns a vec3_t with the three barycentric coordinates.
*/
static vec3_t get_barycentric(const polygon_t *poly, int x, int y) {
vec3_t coords;
coords.x = x * poly->loc2.y;
coords.x -= x * poly->loc3.y;
coords.x -= poly->loc2.x * y;
coords.x += poly->loc2.x * poly->loc3.y;
coords.x += poly->loc3.x * y;
coords.x -= poly->loc3.x * poly->loc2.y;
coords.x /= poly->det;
coords.y = poly->loc1.x * y;
coords.y -= poly->loc1.x * poly->loc3.y;
coords.y -= x * poly->loc1.y;
coords.y += x * poly->loc3.y;
coords.y += poly->loc3.x * poly->loc1.y;
coords.y -= poly->loc3.x * y;
coords.y /= poly->det;
coords.z = poly->loc1.x * poly->loc2.y;
coords.z -= poly->loc1.x * y;
coords.z -= poly->loc2.x * poly->loc1.y;
coords.z += poly->loc2.x * y;
coords.z += x * poly->loc1.y;
coords.z -= x * poly->loc2.y;
coords.z /= poly->det;
return coords;
}
/*
polygon_set_det
----------------------------------------------------------------------------
Sets the determinate for the polygon for use latter with get_barycentric.
Takes a polygon_t
*/
static void polygon_set_det(polygon_t *polygon) {
float det = polygon->loc1.x * polygon->loc2.y;
det -= polygon->loc1.x * polygon->loc3.y;
det -= polygon->loc2.x * polygon->loc1.y;
det += polygon->loc2.x * polygon->loc3.y;
det += polygon->loc3.x * polygon->loc1.y;
det -= polygon->loc3.x * polygon->loc2.y;
polygon->det = det;
}
/*
polygon_draw_pixel
----------------------------------------------------------------------------
Draws to a given pixel with the data of a given polygon.
Takes a color_t and float pointer for the pixel, a polygon_t, and an x and y for the pixel location.
*/
static void polygon_draw_pixel(color_t *pixel, float *zbuffer, polygon_t *polygon, int x, int y) {
vec3_t bary;
vec3_t colorf;
float zvalue;
// get and check the bary centric coordinates
bary = get_barycentric(polygon, x, y);
if (!check_barycentric(&bary))
return;
// check the zbuffer
zvalue = bary.x * polygon->zValues.x + bary.y * polygon->zValues.y + bary.z * polygon->zValues.z;
if (zvalue < *zbuffer)
return;
*zbuffer = zvalue;
colorf.x = bary.x * polygon->color1.x + bary.y * polygon->color2.x + bary.z * polygon->color3.x;
colorf.y = bary.x * polygon->color1.y + bary.y * polygon->color2.y + bary.z * polygon->color3.y;
colorf.z = bary.x * polygon->color1.z + bary.y * polygon->color2.z + bary.z * polygon->color3.z;
// draw the pixel
*pixel = vec3_to_color(&colorf);
}
/*
polygon_draw
----------------------------------------------------------------------------
Iterates over the area defined in the polygon calling polygon_draw_pixel.
Takes a drawbuffer_t to draw to and a polygon_t.
*/
static void polygon_draw(drawbuffer_t *drawbuffer, polygon_t *polygon) {
for (int i = polygon->low.x; i < polygon->high.x; ++i) {
for (int j = polygon->low.y; j < polygon->high.y; ++j) {
color_t *pixel = drawbuffer_get_color_at(drawbuffer, i, j);
float *zvalue = drawbuffer_get_zvalue_at(drawbuffer, i, j);
polygon_draw_pixel(pixel, zvalue, polygon, i, j);
}
}
}
/*
polygon_create
----------------------------------------------------------------------------
Creates a polygon_t from a given triangle_t.
Takes a triangle_t pointer, a vertex_t pointer to an array, and width and height of the drawable area.
Returns a pointer to a polygon_t or NULL if the polygon is back facing.
*/
static polygon_t* polygon_create(ivec3_t *triangle, vertex_t *vertices, int width, int height, mat4_t *mtx) {
polygon_t *polygon;
vertex_t *v1, *v2, *v3;
vec3_t loc1, loc2, loc3;
// get the vertices of the triangle.
v1 = vertices + triangle->x;
v2 = vertices + triangle->y;
v3 = vertices + triangle->z;
// translate the locations
loc1 = mat4_translate_point(mtx, &v1->location);
loc2 = mat4_translate_point(mtx, &v2->location);
loc3 = mat4_translate_point(mtx, &v3->location);
// create the polygon and add the coordinates from the triangle vertices.
polygon = (polygon_t *)malloc(sizeof(polygon_t));
polygon->loc1 = vec3_to_ivec2(&loc1);
polygon->loc2 = vec3_to_ivec2(&loc2);
polygon->loc3 = vec3_to_ivec2(&loc3);
// find the high screen bounds.
polygon->high.x = polygon->loc1.x > polygon->loc2.x ? polygon->loc1.x : polygon->loc2.x;
polygon->high.x = polygon->high.x > polygon->loc3.x ? polygon->high.x : polygon->loc3.x;
polygon->high.x = polygon->high.x >= width ? width - 1 : polygon->high.x;
polygon->high.y = polygon->loc1.y > polygon->loc2.y ? polygon->loc1.y : polygon->loc2.y;
polygon->high.y = polygon->high.y > polygon->loc3.y ? polygon->high.y : polygon->loc3.y;
polygon->high.y = polygon->high.y >= height ? height - 1 : polygon->high.y;
// find the low screen bounds.
polygon->low.x = polygon->loc1.x < polygon->loc2.x ? polygon->loc1.x : polygon->loc2.x;
polygon->low.x = polygon->low.x < polygon->loc3.x ? polygon->low.x : polygon->loc3.x;
polygon->low.x = polygon->low.x < 0 ? 0 : polygon->low.x;
polygon->low.y = polygon->loc1.y < polygon->loc2.y ? polygon->loc1.y : polygon->loc2.y;
polygon->low.y = polygon->low.y < polygon->loc3.y ? polygon->low.y : polygon->loc3.y;
polygon->low.y = polygon->low.y < 0 ? 0 : polygon->low.y;
// get the color and z values from the triangle.
polygon->zValues.x = loc1.z;
polygon->color1 = v1->color;
polygon->zValues.y = loc2.z;
polygon->color2 = v2->color;
polygon->zValues.z = loc3.z;
polygon->color3 = v3->color;
polygon_set_det(polygon);
return polygon;
}
/*
check_barycentric
----------------------------------------------------------------------------
Creates a polygon_t from a given triangle_t.
Takes a triangle_t pointer, a vertex_t pointer to an array, and width and height of the drawable area.
Returns a pointer to a polygon_t or NULL if the polygon is back facing.
*/
void rasterize_mesh(drawbuffer_t *buffers, mesh_t *mesh, int duplicates) {
mat4_t mtxScale, mtx;
float scale;
int subDivs = ceil(sqrtf(duplicates));
scale = buffers->width < buffers->height ? buffers->width : buffers->height;
scale /= subDivs;
mtxScale = mat4_scaled(scale, scale, 1.0f);
for (int i = 0; i < duplicates; ++i) {
mtx = mtxScale;
mat4_translate3f(&mtx, scale * (i/subDivs), scale * (i%subDivs), 0.0f);
for (int j = 0; j < mesh->triangleCount; ++j) {
ivec3_t *triangle = mesh->triangles + j;
polygon_t *polygon = polygon_create(triangle, mesh->vertices, buffers->width, buffers->height, &mtx);
// draw the polygons.
if (polygon != NULL) {
polygon_draw(buffers, polygon);
}
free(polygon);
}
}
}
/*
----------------------------------------------------------------------------
CUDA Code
----------------------------------------------------------------------------
*/
__device__ void cuda_mat4_translate_point_r(mat4_t *m, vec3_t *pt) {
vec3_t newpt;
float w;
newpt.x = pt->x * m->x.x + pt->y * m->y.x + pt->z * m->z.x + m->w.x;
newpt.y = pt->x * m->x.y + pt->y * m->y.y + pt->z * m->z.y + m->w.y;
newpt.z = pt->x * m->x.z + pt->y * m->y.z + pt->z * m->z.z + m->w.z;
w = pt->x * m->x.w + pt->y * m->y.w + pt->z * m->z.w + m->w.w;
pt->x = newpt.x / w;
pt->y = newpt.y / w;
pt->z = newpt.z / w;
}
__global__ void cuda_create_polygons(polygon_t *polygons, vertex_t *vertices, ivec3_t *triangles, int polyCount, int width, int height, mat4_t *mtx) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
vertex_t *v1, *v2, *v3;
ivec3_t *tri;
vec3_t loc1, loc2, loc3;
polygon_t polygon, *polyRef;
if (i < polyCount) {
tri = triangles + i;
v1 = vertices + tri->x;
v2 = vertices + tri->y;
v3 = vertices + tri->z;
polyRef = polygons + i;
loc1 = v1->location;
loc2 = v2->location;
loc3 = v3->location;
cuda_mat4_translate_point_r(mtx, &loc1);
cuda_mat4_translate_point_r(mtx, &loc2);
cuda_mat4_translate_point_r(mtx, &loc3);
// locations
polygon.loc1.x = loc1.x;
polygon.loc1.y = loc1.y;
polygon.loc2.x = loc2.x;
polygon.loc2.y = loc2.y;
polygon.loc3.x = loc3.x;
polygon.loc3.y = loc3.y;
// find the high screen bounds.
polygon.high.x = polygon.loc1.x > polygon.loc2.x ? polygon.loc1.x : polygon.loc2.x;
polygon.high.x = polygon.high.x > polygon.loc3.x ? polygon.high.x : polygon.loc3.x;
polygon.high.x = polygon.high.x >= width ? width - 1 : polygon.high.x;
polygon.high.y = polygon.loc1.y > polygon.loc2.y ? polygon.loc1.y : polygon.loc2.y;
polygon.high.y = polygon.high.y > polygon.loc3.y ? polygon.high.y : polygon.loc3.y;
polygon.high.y = polygon.high.y >= height ? height - 1 : polygon.high.y;
// find the low screen bounds.
polygon.low.x = polygon.loc1.x < polygon.loc2.x ? polygon.loc1.x : polygon.loc2.x;
polygon.low.x = polygon.low.x < polygon.loc3.x ? polygon.low.x : polygon.loc3.x;
polygon.low.x = polygon.low.x < 0 ? 0 : polygon.low.x;
polygon.low.y = polygon.loc1.y < polygon.loc2.y ? polygon.loc1.y : polygon.loc2.y;
polygon.low.y = polygon.low.y < polygon.loc3.y ? polygon.low.y : polygon.loc3.y;
polygon.low.y = polygon.low.y < 0 ? 0 : polygon.low.y;
// get the z values
polygon.zValues.x = loc1.z;
polygon.zValues.y = loc2.z;
polygon.zValues.z = loc3.z;
// get the colors
polygon.color1 = v1->color;
polygon.color2 = v2->color;
polygon.color3 = v3->color;
// pre caluclate the lower part of barycentric.
polygon.det = polygon.loc1.x * polygon.loc2.y;
polygon.det -= polygon.loc1.x * polygon.loc3.y;
polygon.det -= polygon.loc2.x * polygon.loc1.y;
polygon.det += polygon.loc2.x * polygon.loc3.y;
polygon.det += polygon.loc3.x * polygon.loc1.y;
polygon.det -= polygon.loc3.x * polygon.loc2.y;
// save to global memory
*polyRef = polygon;
}
}
__global__ void cuda_clear_buffers(color_t *colorBuffer, float *zBuffer, int *locks, int numPixels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
color_t *color, clearColor = {0, 0, 0};
float *zVal, zClear = FLT_MIN;
int *lock;
if (i < numPixels) {
color = colorBuffer + i;
*color = clearColor;
zVal = zBuffer + i;
*zVal = zClear;
lock = locks + i;
*lock = 0;
}
}
__device__ vec3_t cuda_get_barycentric(polygon_t *poly, int x, int y) {
vec3_t coords;
coords.x = x * poly->loc2.y;
coords.x -= x * poly->loc3.y;
coords.x -= poly->loc2.x * y;
coords.x += poly->loc2.x * poly->loc3.y;
coords.x += poly->loc3.x * y;
coords.x -= poly->loc3.x * poly->loc2.y;
coords.x /= poly->det;
coords.y = poly->loc1.x * y;
coords.y -= poly->loc1.x * poly->loc3.y;
coords.y -= x * poly->loc1.y;
coords.y += x * poly->loc3.y;
coords.y += poly->loc3.x * poly->loc1.y;
coords.y -= poly->loc3.x * y;
coords.y /= poly->det;
coords.z = poly->loc1.x * poly->loc2.y;
coords.z -= poly->loc1.x * y;
coords.z -= poly->loc2.x * poly->loc1.y;
coords.z += poly->loc2.x * y;
coords.z += x * poly->loc1.y;
coords.z -= x * poly->loc2.y;
coords.z /= poly->det;
return coords;
}
__device__ void cuda_attomic_set_color(color_t *pixel, color_t *newColor, float *zBuffVal, float new_zVal, int *lock) {
int lockVal = 0;
while (lockVal = atomicExch(lock, lockVal));
if (*zBuffVal < new_zVal) {
*zBuffVal = new_zVal;
pixel->red = newColor->red;
pixel->green = newColor->green;
pixel->blue = newColor->blue;
}
atomicExch(lock, lockVal);
}
__device__ void cuda_draw_polygon_pixel(color_t *pixel, float *zBuffVal, int *lock, polygon_t *polygon, int x, int y) {
vec3_t bary;
float zVal;
color_t newColor;
bary = cuda_get_barycentric(polygon, x, y);
if (bary.x >= 0 && bary.y >= 0 && bary.z >= 0) {
zVal = bary.x * polygon->zValues.x + bary.y * polygon->zValues.y + bary.z * polygon->zValues.z;
newColor.red = (bary.x * polygon->color1.x + bary.y * polygon->color2.x + bary.z * polygon->color3.x) * 255;
newColor.green = (bary.x * polygon->color1.y + bary.y * polygon->color2.y + bary.z * polygon->color3.y) * 255;
newColor.blue = (bary.x * polygon->color1.z + bary.y * polygon->color2.z + bary.z * polygon->color3.z) * 255;
cuda_attomic_set_color(pixel, &newColor, zBuffVal, zVal, lock);
}
}
__device__ color_t* cuda_get_color_at(color_t *colorBuffer, int width, int x, int y) {
return colorBuffer + width*y + x;
}
__device__ float* cuda_get_zvalue_at(float *zBuffer, int width, int x, int y) {
return zBuffer + width*y + x;
}
__device__ int* cuda_get_lock_at(int *locks, int width, int x, int y) {
return locks + width*y + x;
}
__global__ void cuda_draw_polygons(polygon_t *polygons, int polyCount, color_t *colorBuffer, float *zBuffer, int *locks, int width, int height) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, k;
polygon_t *polygon;
color_t *pixel;
float *zVal;
int *lock;
if (i < polyCount) {
polygon = polygons + i;
for (j = polygon->low.x; j < polygon->high.x; ++j) {
for (k = polygon->low.y; k < polygon->high.y; ++k) {
pixel = cuda_get_color_at(colorBuffer, width, j, k);
zVal = cuda_get_zvalue_at(zBuffer, width, j, k);
lock = cuda_get_lock_at(locks, width, j, k);
cuda_draw_polygon_pixel(pixel, zVal, lock, polygon, j, k);
}
}
}
}
void create_polygons_cuda(mesh_t *mesh, int width, int height, mat4_t *mtx) {
int block_size = 16;
int num_blocks = mesh->triangleCount / block_size + (mesh->triangleCount % block_size == 0 ? 0 : 1);
mat4_t *d_mtx;
cudaMalloc((void **) &d_mtx, sizeof(mat4_t));
cudaMemcpy(d_mtx, mtx, sizeof(mat4_t), cudaMemcpyHostToDevice);
cuda_create_polygons <<< num_blocks, block_size >>> (mesh->d_polygons, mesh->d_vertices, mesh->d_triangles, mesh->polygonCount, width, height, d_mtx);
cudaFree(d_mtx);
}
void clear_buffers_cuda(drawbuffer_t *buffers) {
int block_size = 64;
int num_blocks = (buffers->width * buffers->height) / block_size + ((buffers->width * buffers->height) % block_size == 0 ? 0 : 1);
cuda_clear_buffers <<< num_blocks, block_size >>> (buffers->d_colorBuffer, buffers->d_zBuffer, buffers->d_locks, buffers->width * buffers->height);
}
void rasterize_mesh_cuda(drawbuffer_t *buffers, mesh_t *mesh, int duplicates) {
mat4_t mtxScale, mtx;
float scale;
int subDivs = ceil(sqrtf(duplicates));
int block_size = 16;
int num_blocks = mesh->polygonCount / block_size + (mesh->polygonCount % block_size == 0 ? 0 : 1);
scale = buffers->width < buffers->height ? buffers->width : buffers->height;
scale /= subDivs;
mtxScale = mat4_scaled(scale, scale, 1.0f);
for (int i = 0; i < duplicates; ++i) {
mtx = mtxScale;
mat4_translate3f(&mtx, scale * (i/subDivs), scale * (i%subDivs), 0.0f);
create_polygons_cuda(mesh, buffers->width, buffers->height, &mtx);
cuda_draw_polygons <<< num_blocks, block_size >>> (mesh->d_polygons, mesh->polygonCount, buffers->d_colorBuffer,
buffers->d_zBuffer, buffers->d_locks, buffers->width, buffers->height);
}
}
|
6da2f85dc82ad9283676d1cc0b083de2f932d8ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "rocblas.h"
#define CudaAssert( expression ) \
if ( !(expression)) { \
printf( "Assert failed %d:%d at %s:%d\n", blockIdx.x, threadIdx.x, __FILE__, __LINE__ ); \
}
#include "utils.c"
#include "SoftMaxTree.cu"
#include "BlockSparse.cu"
#include "WindowSparse.cu"
#include "WindowGate.cu"
#include "WindowGate2.cu"
#include "LazyKBest.cu"
#include "CudaQuickProduct.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunnx(lua_State *L);
int luaopen_libcunnx(lua_State *L)
{
lua_newtable(L);
cunnx_SoftMaxTree_init(L);
cunnx_BlockSparse_init(L);
cunnx_WindowSparse_init(L);
cunnx_WindowGate_init(L);
cunnx_WindowGate2_init(L);
cunnx_LazyKBest_init(L);
cunnx_CudaQuickProduct_init(L);
return 1;
}
| 6da2f85dc82ad9283676d1cc0b083de2f932d8ba.cu | #include "luaT.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "cublas_v2.h"
#define CudaAssert( expression ) \
if ( !(expression)) { \
printf( "Assert failed %d:%d at %s:%d\n", blockIdx.x, threadIdx.x, __FILE__, __LINE__ ); \
}
#include "utils.c"
#include "SoftMaxTree.cu"
#include "BlockSparse.cu"
#include "WindowSparse.cu"
#include "WindowGate.cu"
#include "WindowGate2.cu"
#include "LazyKBest.cu"
#include "CudaQuickProduct.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunnx(lua_State *L);
int luaopen_libcunnx(lua_State *L)
{
lua_newtable(L);
cunnx_SoftMaxTree_init(L);
cunnx_BlockSparse_init(L);
cunnx_WindowSparse_init(L);
cunnx_WindowGate_init(L);
cunnx_WindowGate2_init(L);
cunnx_LazyKBest_init(L);
cunnx_CudaQuickProduct_init(L);
return 1;
}
|
453aafe5d978554dd1670ed2823c5b94f0014e11.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020-2023, XGBoost contributors
*/
#include <algorithm>
#include <memory>
#include <type_traits>
#include "../common/hist_util.cuh"
#include "batch_utils.h" // for RegenGHist
#include "device_adapter_hip.cuh"
#include "ellpack_page.cuh"
#include "gradient_index.h"
#include "iterative_dmatrix.h"
#include "proxy_dmatrix.cuh"
#include "proxy_dmatrix.h"
#include "simple_batch_iterator.h"
#include "sparse_page_source.h"
namespace xgboost::data {
void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p,
DataIterHandle iter_handle, float missing,
std::shared_ptr<DMatrix> ref) {
// A handle passed to external iterator.
DMatrixProxy* proxy = MakeProxy(proxy_);
CHECK(proxy);
// The external iterator
auto iter =
DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const& value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const& value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t current_device;
dh::safe_cuda(hipGetDevice(¤t_device));
auto get_device = [&]() -> int32_t {
std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id;
CHECK_NE(d, Context::kCpuId);
return d;
};
/**
* Generate quantiles
*/
common::HistogramCuts cuts;
do {
// We use do while here as the first batch is fetched in ctor
// ctx_.gpu_id = proxy->DeviceIdx();
CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs());
dh::safe_cuda(hipSetDevice(get_device()));
if (cols == 0) {
cols = num_cols();
collective::Allreduce<collective::Operation::kMax>(&cols, 1);
this->info_.num_col_ = cols;
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
if (!ref) {
sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(),
get_device());
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(get_device());
Dispatch(proxy, [&](auto const& value) {
common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch);
});
}
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size());
row_stride = ::max(row_stride, Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
}));
nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(), row_counts.end());
batches++;
} while (iter.Next());
iter.Reset();
auto n_features = cols;
CHECK_GE(n_features, 1) << "Data must has at least 1 column.";
dh::safe_cuda(hipSetDevice(get_device()));
if (!ref) {
HostDeviceVector<FeatureType> ft;
common::SketchContainer final_sketch(
sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols,
accumulated_rows, get_device());
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
final_sketch.MakeCuts(&cuts);
} else {
GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts);
}
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() {
if (!ellpack_) {
// Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid.
ellpack_.reset(new EllpackPage);
*(ellpack_->Impl()) =
EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows);
}
};
/**
* Generate gradient index.
*/
size_t offset = 0;
iter.Reset();
size_t n_batches_for_verification = 0;
while (iter.Next()) {
init_page();
dh::safe_cuda(hipSetDevice(get_device()));
auto rows = num_rows();
dh::device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
});
auto is_dense = this->IsDense();
proxy->Info().feature_types.SetDevice(get_device());
auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan();
auto new_impl = Dispatch(proxy, [&](auto const& value) {
return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span,
d_feature_types, row_stride, rows, cuts);
});
size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false, true);
}
n_batches_for_verification++;
}
CHECK_EQ(batches, n_batches_for_verification)
<< "Different number of batches returned between 2 iterations";
if (batches == 1) {
this->info_ = std::move(proxy->Info());
this->info_.num_nonzero_ = nnz;
CHECK_EQ(proxy->Info().labels.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
info_.SynchronizeNumberOfColumns();
}
BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx,
BatchParam const& param) {
if (param.Initialized()) {
CheckParam(param);
CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin();
}
if (!ellpack_ && !ghist_) {
LOG(FATAL) << "`QuantileDMatrix` not initialized.";
}
if (!ellpack_) {
ellpack_.reset(new EllpackPage());
if (ctx->IsCUDA()) {
this->Info().feature_types.SetDevice(ctx->gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
} else if (fmat_ctx_.IsCUDA()) {
this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
} else {
// Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM
// for cut reference.
auto cuda_ctx = ctx->MakeCUDA();
this->Info().feature_types.SetDevice(cuda_ctx.gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
}
}
CHECK(ellpack_);
auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_));
return BatchSet<EllpackPage>(begin_iter);
}
void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) {
*cuts = page.Impl()->Cuts();
}
} // namespace xgboost::data
| 453aafe5d978554dd1670ed2823c5b94f0014e11.cu | /**
* Copyright 2020-2023, XGBoost contributors
*/
#include <algorithm>
#include <memory>
#include <type_traits>
#include "../common/hist_util.cuh"
#include "batch_utils.h" // for RegenGHist
#include "device_adapter.cuh"
#include "ellpack_page.cuh"
#include "gradient_index.h"
#include "iterative_dmatrix.h"
#include "proxy_dmatrix.cuh"
#include "proxy_dmatrix.h"
#include "simple_batch_iterator.h"
#include "sparse_page_source.h"
namespace xgboost::data {
void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p,
DataIterHandle iter_handle, float missing,
std::shared_ptr<DMatrix> ref) {
// A handle passed to external iterator.
DMatrixProxy* proxy = MakeProxy(proxy_);
CHECK(proxy);
// The external iterator
auto iter =
DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const& value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const& value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t current_device;
dh::safe_cuda(cudaGetDevice(¤t_device));
auto get_device = [&]() -> int32_t {
std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id;
CHECK_NE(d, Context::kCpuId);
return d;
};
/**
* Generate quantiles
*/
common::HistogramCuts cuts;
do {
// We use do while here as the first batch is fetched in ctor
// ctx_.gpu_id = proxy->DeviceIdx();
CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs());
dh::safe_cuda(cudaSetDevice(get_device()));
if (cols == 0) {
cols = num_cols();
collective::Allreduce<collective::Operation::kMax>(&cols, 1);
this->info_.num_col_ = cols;
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
if (!ref) {
sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(),
get_device());
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(get_device());
Dispatch(proxy, [&](auto const& value) {
common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch);
});
}
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size());
row_stride = std::max(row_stride, Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
}));
nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(), row_counts.end());
batches++;
} while (iter.Next());
iter.Reset();
auto n_features = cols;
CHECK_GE(n_features, 1) << "Data must has at least 1 column.";
dh::safe_cuda(cudaSetDevice(get_device()));
if (!ref) {
HostDeviceVector<FeatureType> ft;
common::SketchContainer final_sketch(
sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols,
accumulated_rows, get_device());
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
final_sketch.MakeCuts(&cuts);
} else {
GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts);
}
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() {
if (!ellpack_) {
// Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid.
ellpack_.reset(new EllpackPage);
*(ellpack_->Impl()) =
EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows);
}
};
/**
* Generate gradient index.
*/
size_t offset = 0;
iter.Reset();
size_t n_batches_for_verification = 0;
while (iter.Next()) {
init_page();
dh::safe_cuda(cudaSetDevice(get_device()));
auto rows = num_rows();
dh::device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
});
auto is_dense = this->IsDense();
proxy->Info().feature_types.SetDevice(get_device());
auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan();
auto new_impl = Dispatch(proxy, [&](auto const& value) {
return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span,
d_feature_types, row_stride, rows, cuts);
});
size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false, true);
}
n_batches_for_verification++;
}
CHECK_EQ(batches, n_batches_for_verification)
<< "Different number of batches returned between 2 iterations";
if (batches == 1) {
this->info_ = std::move(proxy->Info());
this->info_.num_nonzero_ = nnz;
CHECK_EQ(proxy->Info().labels.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
info_.SynchronizeNumberOfColumns();
}
BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx,
BatchParam const& param) {
if (param.Initialized()) {
CheckParam(param);
CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin();
}
if (!ellpack_ && !ghist_) {
LOG(FATAL) << "`QuantileDMatrix` not initialized.";
}
if (!ellpack_) {
ellpack_.reset(new EllpackPage());
if (ctx->IsCUDA()) {
this->Info().feature_types.SetDevice(ctx->gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
} else if (fmat_ctx_.IsCUDA()) {
this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
} else {
// Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM
// for cut reference.
auto cuda_ctx = ctx->MakeCUDA();
this->Info().feature_types.SetDevice(cuda_ctx.gpu_id);
*ellpack_->Impl() =
EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan());
}
}
CHECK(ellpack_);
auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_));
return BatchSet<EllpackPage>(begin_iter);
}
void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) {
*cuts = page.Impl()->Cuts();
}
} // namespace xgboost::data
|
013d31f935a1390e4fe123b2ed174da853d5dd1b.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1
//null pointer access
// ALTOUGH, IT WORKS
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 2//4//8
__global__ void foo(int *H) {
size_t tmp = (size_t)H; //type cast
tmp += sizeof(int);
int *G = (int *)tmp;
G -= 1; //POSSIBLE NULL POINTER ACCESS
G[threadIdx.x] = threadIdx.x;
__syncthreads();
H[threadIdx.x] = G[threadIdx.x];
}
| 013d31f935a1390e4fe123b2ed174da853d5dd1b.cu | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1
//null pointer access
// ALTOUGH, IT WORKS
#include <stdio.h>
#include <cuda.h>
#define N 2//4//8
__global__ void foo(int *H) {
size_t tmp = (size_t)H; //type cast
tmp += sizeof(int);
int *G = (int *)tmp;
G -= 1; //POSSIBLE NULL POINTER ACCESS
G[threadIdx.x] = threadIdx.x;
__syncthreads();
H[threadIdx.x] = G[threadIdx.x];
}
|
80784c1445ad1e848709b8476eafbd6ce06d3a76.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "test_shfl_up.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
test_shfl_up), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
test_shfl_up), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
test_shfl_up), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 80784c1445ad1e848709b8476eafbd6ce06d3a76.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "test_shfl_up.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
test_shfl_up<<<gridBlock,threadBlock>>>(in,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
test_shfl_up<<<gridBlock,threadBlock>>>(in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
test_shfl_up<<<gridBlock,threadBlock>>>(in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bfae8fe0b21c3692aeb8cc14094dfcc6981d3f1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include <stdio.h>
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* define blocksize X and blocksize Y and blocksize K */
#define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension
#define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension
#define BK 16 // square block of K size
__global__ void GPU_shmem(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constanst for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * THREADS_PER_BLOCK_Y;
const int ibx = blockIdx.x * THREADS_PER_BLOCK_X;
/* shared memory arrays for A and B */
__shared__ double as[ THREADS_PER_BLOCK_X ][ BK + 1 ];
__shared__ double bs[ BK ][ THREADS_PER_BLOCK_Y + 1 ];
/* space for C to be held in registers */
double c_tmp = 0.0 ;
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BK )
{
/* read block of A into shared memory */
as[ tx ][ ty ] = a[ aoff ];
/* read block of B into shared memory */
bs[ tx ][ ty ] = b[ boff ];
__syncthreads();
/* increment A and B offsets for next round of data reads */
boff += BK;
aoff += m * BK;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
for( int k = 0 ; k < BK ; k++ )
{
c_tmp += as[ tx ][ k ] * bs[ k ][ ty ];
}
__syncthreads();
} /* end for Kblock */
/* set C to its proper index int the C matrix */
int coff = INDX( ibx + tx, iby + ty, m );
/* write results to the C matrix */
c[ coff ] = c_tmp;
} /* end GPU_shmem */
int main( int argc, char *argv[] )
{
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
hipMalloc( (void **)&d_a, numbytes );
hipMalloc( (void **)&d_b, numbytes );
hipMalloc( (void **)&d_c, numbytes );
/* copy a and b to device */
hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice );
hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice );
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate( &handle );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
/* call CUBLAS dgemm */
hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size );
/* stop timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost );
/* reset C on device to zero */
hipMemset( d_c, 0, numbytes );
/* setup grid and block sizes */
dim3 blocksize( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 gridsize( size / blocksize.x, size / blocksize.y, 1 );
/* start timers */
hipEventRecord( start, 0 );
/* call GPU_naive */
hipLaunchKernelGGL(( GPU_shmem), dim3(gridsize), dim3(blocksize) , 0, 0, size, d_a, d_b, d_c );
/* stop timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost );
hipblasDestroy( handle );
hipEventDestroy( start );
hipEventDestroy( stop );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("Error value is suspiciously high!\n");
/* cleanup */
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| bfae8fe0b21c3692aeb8cc14094dfcc6981d3f1a.cu | #include "cuda_runtime.h"
#include "cublas_v2.h"
#include <stdio.h>
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* define blocksize X and blocksize Y and blocksize K */
#define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension
#define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension
#define BK 16 // square block of K size
__global__ void GPU_shmem(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constanst for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * THREADS_PER_BLOCK_Y;
const int ibx = blockIdx.x * THREADS_PER_BLOCK_X;
/* shared memory arrays for A and B */
__shared__ double as[ THREADS_PER_BLOCK_X ][ BK + 1 ];
__shared__ double bs[ BK ][ THREADS_PER_BLOCK_Y + 1 ];
/* space for C to be held in registers */
double c_tmp = 0.0 ;
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BK )
{
/* read block of A into shared memory */
as[ tx ][ ty ] = a[ aoff ];
/* read block of B into shared memory */
bs[ tx ][ ty ] = b[ boff ];
__syncthreads();
/* increment A and B offsets for next round of data reads */
boff += BK;
aoff += m * BK;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
for( int k = 0 ; k < BK ; k++ )
{
c_tmp += as[ tx ][ k ] * bs[ k ][ ty ];
}
__syncthreads();
} /* end for Kblock */
/* set C to its proper index int the C matrix */
int coff = INDX( ibx + tx, iby + ty, m );
/* write results to the C matrix */
c[ coff ] = c_tmp;
} /* end GPU_shmem */
int main( int argc, char *argv[] )
{
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
cudaMalloc( (void **)&d_a, numbytes );
cudaMalloc( (void **)&d_b, numbytes );
cudaMalloc( (void **)&d_c, numbytes );
/* copy a and b to device */
cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice );
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate( &handle );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call CUBLAS dgemm */
cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size );
/* stop timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost );
/* reset C on device to zero */
cudaMemset( d_c, 0, numbytes );
/* setup grid and block sizes */
dim3 blocksize( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 gridsize( size / blocksize.x, size / blocksize.y, 1 );
/* start timers */
cudaEventRecord( start, 0 );
/* call GPU_naive */
GPU_shmem<<< gridsize, blocksize >>> ( size, d_a, d_b, d_c );
/* stop timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost );
cublasDestroy( handle );
cudaEventDestroy( start );
cudaEventDestroy( stop );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("Error value is suspiciously high!\n");
/* cleanup */
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
6731069ab354c228e6652fb51ddf7176096be977.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlange.cu normal z -> d, Fri Jul 18 17:34:12 2014
@author Mark Gates
*/
#include "common_magma.h"
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf,
* where m and n are any size.
* Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */
extern "C" __global__ void
dlange_inf_kernel(
int m, int n, const double *A, int lda, double *dwork )
{
int i = blockIdx.x*64 + threadIdx.x;
double Cb[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
Cb[0] += fabs( rA[0] ); rA[0] = A[0];
Cb[1] += fabs( rA[1] ); rA[1] = A[lda];
Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda];
Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda];
A += 4*lda;
}
Cb[0] += fabs( rA[0] );
Cb[1] += fabs( rA[1] );
Cb[2] += fabs( rA[2] );
Cb[3] += fabs( rA[3] );
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
Cb[0] += fabs( A[0] );
break;
case 2:
Cb[0] += fabs( A[0] );
Cb[1] += fabs( A[lda] );
break;
case 3:
Cb[0] += fabs( A[0] );
Cb[1] += fabs( A[lda] );
Cb[2] += fabs( A[2*lda] );
break;
}
/* compute final result */
dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3];
}
}
/**
Purpose
-------
DLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
-----------
DLANGE returns the value
DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported
(
( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported
(
( normI(A), NORM = 'I' or 'i'
(
( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
---------
@param[in]
norm CHARACTER*1
Specifies the value to be returned in DLANGE as described
above.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
DLANGE is set to zero.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
DLANGE is set to zero.
@param[in]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
The m by n matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(M,1).
@param
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= M when NORM = 'I'; otherwise, WORK is not
referenced.
@ingroup magma_daux2
********************************************************************/
extern "C" double
magmablas_dlange(
magma_norm_t norm, magma_int_t m, magma_int_t n,
const double *A, magma_int_t lda, double *dwork )
{
magma_int_t info = 0;
if ( norm != MagmaInfNorm )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( lda < m )
info = -5;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( m == 0 || n == 0 )
return 0;
dim3 threads( 64 );
dim3 grid( (m-1)/64 + 1 );
hipLaunchKernelGGL(( dlange_inf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda, dwork );
int i = magma_idamax( m, dwork, 1 ) - 1;
double res;
hipMemcpy( &res, &dwork[i], sizeof(double), hipMemcpyDeviceToHost );
return res;
}
| 6731069ab354c228e6652fb51ddf7176096be977.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlange.cu normal z -> d, Fri Jul 18 17:34:12 2014
@author Mark Gates
*/
#include "common_magma.h"
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf,
* where m and n are any size.
* Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */
extern "C" __global__ void
dlange_inf_kernel(
int m, int n, const double *A, int lda, double *dwork )
{
int i = blockIdx.x*64 + threadIdx.x;
double Cb[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
Cb[0] += fabs( rA[0] ); rA[0] = A[0];
Cb[1] += fabs( rA[1] ); rA[1] = A[lda];
Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda];
Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda];
A += 4*lda;
}
Cb[0] += fabs( rA[0] );
Cb[1] += fabs( rA[1] );
Cb[2] += fabs( rA[2] );
Cb[3] += fabs( rA[3] );
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
Cb[0] += fabs( A[0] );
break;
case 2:
Cb[0] += fabs( A[0] );
Cb[1] += fabs( A[lda] );
break;
case 3:
Cb[0] += fabs( A[0] );
Cb[1] += fabs( A[lda] );
Cb[2] += fabs( A[2*lda] );
break;
}
/* compute final result */
dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3];
}
}
/**
Purpose
-------
DLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
-----------
DLANGE returns the value
DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported
(
( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported
(
( normI(A), NORM = 'I' or 'i'
(
( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
---------
@param[in]
norm CHARACTER*1
Specifies the value to be returned in DLANGE as described
above.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
DLANGE is set to zero.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
DLANGE is set to zero.
@param[in]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
The m by n matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(M,1).
@param
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= M when NORM = 'I'; otherwise, WORK is not
referenced.
@ingroup magma_daux2
********************************************************************/
extern "C" double
magmablas_dlange(
magma_norm_t norm, magma_int_t m, magma_int_t n,
const double *A, magma_int_t lda, double *dwork )
{
magma_int_t info = 0;
if ( norm != MagmaInfNorm )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( lda < m )
info = -5;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( m == 0 || n == 0 )
return 0;
dim3 threads( 64 );
dim3 grid( (m-1)/64 + 1 );
dlange_inf_kernel<<< grid, threads, 0, magma_stream >>>( m, n, A, lda, dwork );
int i = magma_idamax( m, dwork, 1 ) - 1;
double res;
cudaMemcpy( &res, &dwork[i], sizeof(double), cudaMemcpyDeviceToHost );
return res;
}
|
97a7ec27c97a8e54d58a5321c9839caa6f63624b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <helper_functions.h> //
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
////////////////////////////////////////////////////////////////////////////////
//! GPU
//! C = alpha * A * B + beta * C
//! @param A A
//! @param B B
//! @param C C
//! @param N A C
//! @param M B C
//! @param K A B
//! @param alpha
//! @param beta
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_gpu_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i) {
sum += A[row * K + i] * B[i * K + col];
}
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
hipLaunchKernelGGL(( sgemm_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, N, M, K, alpha, beta);
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i) {
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
void performance_estimation(void(*sgemm)(const float *, const float *, float *, int, int, int, float, float),
const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int test_iterations = 100;
//
StopWatchInterface *timer = 0;
//
sgemm(A, B, C, N, M, K, alpha, beta);
//
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
//
for (int i = 0; i < test_iterations; i++) {
sgemm(A, B, C, N, M, K, alpha, beta);
}
// GPU
sdkStopTimer(&timer);
//
float operation_time = sdkGetAverageTimerValue(&timer);
float operation_time_1_epoch = operation_time / test_iterations;
printf("Operation Time= %.4f msec\n", operation_time_1_epoch);
//
sdkDeleteTimer(&timer);
}
int main(void)
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
N = M = K = 2048;
// CPU 1
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// GPU 1
hipMalloc((void **)&d_A, N * K * sizeof(float));
hipMalloc((void **)&d_B, K * M * sizeof(float));
hipMalloc((void **)&d_C, N * M * sizeof(float));
//
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// GPU
hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, A, K * M * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, A, N * M * sizeof(float), hipMemcpyHostToDevice);
//
//sgemm_gpu(d_A, d_B, d_C, N, M, K, alpha, beta);
performance_estimation(sgemm_gpu, d_A, d_B, d_C, N, M, K, alpha, beta);
// GPU
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// CPU
free(A);
free(B);
free(C);
return 0;
} | 97a7ec27c97a8e54d58a5321c9839caa6f63624b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <helper_functions.h> // ベンチマーク計測のため
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
////////////////////////////////////////////////////////////////////////////////
//! GPU で次のような計算をする
//! C = alpha * A * B + beta * C
//! @param A デバイスで計算に使う行列 A
//! @param B デバイスで計算に使う行列 B
//! @param C デバイスで計算に使う行列 C
//! @param N 行列 A と C の高さ
//! @param M 行列 B と C の幅
//! @param K 行列 A の幅,行列 B の 高さ
//! @param alpha 行列の掛け算のときに掛けるスカラ値
//! @param beta 行列の和を取るときに掛けるスカラ値
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_gpu_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i) {
sum += A[row * K + i] * B[i * K + col];
}
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
sgemm_gpu_kernel<<<dimGrid, dimBlock>>>(A, B, C, N, M, K, alpha, beta);
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i) {
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
void performance_estimation(void(*sgemm)(const float *, const float *, float *, int, int, int, float, float),
const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int test_iterations = 100;
// タイマの作成
StopWatchInterface *timer = 0;
// ウォームスタートで計算を開始する
sgemm(A, B, C, N, M, K, alpha, beta);
// イベントの開始を記録する
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// 操作本体
for (int i = 0; i < test_iterations; i++) {
sgemm(A, B, C, N, M, K, alpha, beta);
}
// GPU での操作が終わったら終了時刻を記録する
sdkStopTimer(&timer);
// パフォーマンスの計算と表示
float operation_time = sdkGetAverageTimerValue(&timer);
float operation_time_1_epoch = operation_time / test_iterations;
printf("Operation Time= %.4f msec\n", operation_time_1_epoch);
// タイマを破棄
sdkDeleteTimer(&timer);
}
int main(void)
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
N = M = K = 2048;
// CPU 側で1次元のメモリ領域を確保する
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// GPU 側で1次元のメモリ領域を確保する
cudaMalloc((void **)&d_A, N * K * sizeof(float));
cudaMalloc((void **)&d_B, K * M * sizeof(float));
cudaMalloc((void **)&d_C, N * M * sizeof(float));
// データの初期化
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// GPU 側にメモリをコピー
cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, A, K * M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, A, N * M * sizeof(float), cudaMemcpyHostToDevice);
// パフォーマンスの計測
//sgemm_gpu(d_A, d_B, d_C, N, M, K, alpha, beta);
performance_estimation(sgemm_gpu, d_A, d_B, d_C, N, M, K, alpha, beta);
// GPU 側のメモリ解放
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// CPU 側のメモリ解放
free(A);
free(B);
free(C);
return 0;
} |
8140a0da7c68b73276bd3dccca9dd003e4655c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <random>
#include <iostream>
#include <algorithm>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "thrust_all.cuh"
//curandState
__global__ void setCurand(unsigned long long seed, hiprandState_t *state){
uint i_global = threadIdx.x + blockIdx.x*blockDim.x;
hiprand_init(seed, i_global, 0, &state[i_global]);
}
struct execSetting{
dim3 grid;
dim3 block;
execSetting(dim3 _grid,dim3 _block){
grid = _grid;
block = block;
}
execSetting(int gridsize,int blocksize){
dim3 _grid(gridsize);
grid = _grid;
dim3 _block(blocksize);
block = _block;
}
};
void setRand(hiprandState_t *state,execSetting set){
std::random_device _rnd;
hipLaunchKernelGGL(( setCurand), dim3(set.grid),dim3(set.block), 0, 0, _rnd(), state);
}
template <typename T>
std::size_t check_size(int N){
return N * sizeof(T);
}
//
__global__ void genrand_kernel(float *result, hiprandState_t *state){
uint i_global = threadIdx.x + blockIdx.x*blockDim.x;
result[i_global] = hiprand_normal(&state[i_global]);
}
__global__ void calc_pi(float *result, hiprandState_t *state){
auto idx = threadIdx.x + blockIdx.x*blockDim.x;
auto rnd_x = hiprand_uniform(&state[idx]);
auto rnd_y = hiprand_uniform(&state[idx]);
result[idx] = (rnd_x * rnd_x + rnd_y * rnd_y < 1.0f) ? 1.0f : 0.0f;
}
int main(){
constexpr unsigned int N = 1<<24;
constexpr unsigned int num_Blocks = 1<<14;
unsigned int threads_per_blocks = ::min(::ceil(static_cast<double>(N)/num_Blocks),1024.0);
hipEvent_t start, stop;
float elapse;
// initialize time
hipEventCreate(&start);
hipEventCreate(&stop);
// record initial time
hipEventRecord(start);
thrust::device_vector<float> result(N);
thrust::device_vector<hiprandState_t> state(N);
thrust::host_vector<float> hresult(N);
float *result_ptr = thrust::raw_pointer_cast(&result[0]);
hiprandState_t *state_ptr = thrust::raw_pointer_cast(&state[0]);
execSetting set(num_Blocks,threads_per_blocks);
setRand(state_ptr, set);
hipLaunchKernelGGL(( calc_pi), dim3(set.grid),dim3(set.block), 0, 0, result_ptr,state_ptr);
double pi = thrust::reduce(result.begin(),result.end(),0.0f,thrust::plus<float>());
std::cout << 4.0/N*pi << '\n';
hipEventRecord(stop);
//wait until all events complete
hipEventSynchronize(stop);
// calc
hipEventElapsedTime(&elapse, start, stop);
std::cout<<"gpu :"<<elapse<<"ms"<<std::endl;
//
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 8140a0da7c68b73276bd3dccca9dd003e4655c5d.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <random>
#include <iostream>
#include <algorithm>
#include <curand.h>
#include <curand_kernel.h>
#include "thrust_all.cuh"
//curandStateの初期化
__global__ void setCurand(unsigned long long seed, curandState *state){
uint i_global = threadIdx.x + blockIdx.x*blockDim.x;
curand_init(seed, i_global, 0, &state[i_global]);
}
struct execSetting{
dim3 grid;
dim3 block;
execSetting(dim3 _grid,dim3 _block){
grid = _grid;
block = block;
}
execSetting(int gridsize,int blocksize){
dim3 _grid(gridsize);
grid = _grid;
dim3 _block(blocksize);
block = _block;
}
};
void setRand(curandState *state,execSetting set){
std::random_device _rnd;
setCurand<<<set.grid,set.block>>>(_rnd(), state);
}
template <typename T>
std::size_t check_size(int N){
return N * sizeof(T);
}
//一様乱数を返す
__global__ void genrand_kernel(float *result, curandState *state){
uint i_global = threadIdx.x + blockIdx.x*blockDim.x;
result[i_global] = curand_normal(&state[i_global]);
}
__global__ void calc_pi(float *result, curandState *state){
auto idx = threadIdx.x + blockIdx.x*blockDim.x;
auto rnd_x = curand_uniform(&state[idx]);
auto rnd_y = curand_uniform(&state[idx]);
result[idx] = (rnd_x * rnd_x + rnd_y * rnd_y < 1.0f) ? 1.0f : 0.0f;
}
int main(){
constexpr unsigned int N = 1<<24;
constexpr unsigned int num_Blocks = 1<<14;
unsigned int threads_per_blocks = std::min(std::ceil(static_cast<double>(N)/num_Blocks),1024.0);
cudaEvent_t start, stop;
float elapse;
// initialize time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record initial time
cudaEventRecord(start);
thrust::device_vector<float> result(N);
thrust::device_vector<curandState> state(N);
thrust::host_vector<float> hresult(N);
float *result_ptr = thrust::raw_pointer_cast(&result[0]);
curandState *state_ptr = thrust::raw_pointer_cast(&state[0]);
execSetting set(num_Blocks,threads_per_blocks);
setRand(state_ptr, set);
calc_pi<<<set.grid,set.block>>>(result_ptr,state_ptr);
double pi = thrust::reduce(result.begin(),result.end(),0.0f,thrust::plus<float>());
std::cout << 4.0/N*pi << '\n';
cudaEventRecord(stop);
//wait until all events complete
cudaEventSynchronize(stop);
// calc
cudaEventElapsedTime(&elapse, start, stop);
std::cout<<"gpu :"<<elapse<<"ms"<<std::endl;
// 終了処理
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
554751b7df985ceebc0e7d328390ea43ffc23ab4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 6
#define n 5
int main(void){
hipblasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
hipMallocManaged(&a,m*n*sizeof(float));
hipMallocManaged(&x,m*sizeof(float));
hipMallocManaged(&y,n*sizeof(float));
int ind = 11;
float al = 2.0f;
for(j = 0; j < n; j++){
for(i = 0; i < m; i++){
a[IDX2C(i,j,m)] = (float)ind++;
}
}
for(i = 0; i < m; i++) x[i] = 1.0f;
for(i = 0; i < n; i++) y[i] = 1.0f;
hipblasCreate(&handle);
hipblasSger(handle,m,n,&al,x,1,y,1,a,m);
hipDeviceSynchronize();
printf("a after sger:\n");
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
printf("%7.0f", a[IDX2C(i,j,m)]);
}
printf("\n");
}
hipFree(a);
hipFree(x);
hipFree(y);
hipblasDestroy(handle);
return EXIT_SUCCESS;
}
| 554751b7df985ceebc0e7d328390ea43ffc23ab4.cu | #include <stdio.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 6
#define n 5
int main(void){
cublasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
cudaMallocManaged(&a,m*n*sizeof(float));
cudaMallocManaged(&x,m*sizeof(float));
cudaMallocManaged(&y,n*sizeof(float));
int ind = 11;
float al = 2.0f;
for(j = 0; j < n; j++){
for(i = 0; i < m; i++){
a[IDX2C(i,j,m)] = (float)ind++;
}
}
for(i = 0; i < m; i++) x[i] = 1.0f;
for(i = 0; i < n; i++) y[i] = 1.0f;
cublasCreate(&handle);
cublasSger(handle,m,n,&al,x,1,y,1,a,m);
cudaDeviceSynchronize();
printf("a after sger:\n");
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
printf("%7.0f", a[IDX2C(i,j,m)]);
}
printf("\n");
}
cudaFree(a);
cudaFree(x);
cudaFree(y);
cublasDestroy(handle);
return EXIT_SUCCESS;
}
|
ddedd7557ccc74360ddf00b2e1d0aa0f131c9cc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdm3.cuh"
__global__ void hyperdifmomsource3_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, int ii, int ii0, real dt)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real rdx;
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1));
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//if(i<((p->n[0])) && j<((p->n[1])))
if(i<((p->n[0])) && j<((p->n[1])))
{
dwn1[fencode_hdm3(p,i,j,energy)]=wtemp[fencode_hdm3(p,i,j,tmp6)]*wd[fencode_hdm3(p,i,j,hdnur)]*wtemp[fencode_hdm3(p,i,j,tmp8)]-wtemp[fencode_hdm3(p,i,j,tmp5)]*wd[fencode_hdm3(p,i,j,hdnul)]*wtemp[fencode_hdm3(p,i,j,tmp7)]/(rdx)/2;
dwn1[fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnur)]*wtemp[fencode_hdm3(p,i,j,tmp8)]-wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnul)]*wtemp[fencode_hdm3(p,i,j,tmp7)])/(rdx)/2;
/*dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,energy)]=(
( wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i+(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)
-(wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i-(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1))/2;
dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)-wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1))/2;*/
/*dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,energy)]=(
( wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i+(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)
-(wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i-(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/2;
dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)-wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/2;*/
}
}
__syncthreads();
/* for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i<(ni) && j<(nj))
{
bc_periodic1_hdm3(dwn1,p,i,j,mom1+ii0);
bc_periodic1_hdm3(dwn1,p,i,j,energy);
}
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i<(ni) && j<(nj))
{
bc_periodic2_hdm3(dwn1,p,i,j,mom1+ii0);
bc_periodic2_hdm3(dwn1,p,i,j,energy);
}
}
__syncthreads();*/
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
// if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
if(i<((p->n[0])) && j<((p->n[1])))
{
// - sign here same as vac maybe a +
wmod[fencode_hdm3(p,i,j,mom1+ii0)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdm3(p,i,j,mom1+ii0)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdm3(p,i,j,mom1+ii0)];
wmod[fencode_hdm3(p,i,j,energy)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdm3(p,i,j,energy)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdm3(p,i,j,energy)];
}
// }
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdm3(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifmomsource3(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real **d_wtemp, int field, int dim, int ii, int ii0, real dt)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hipLaunchKernelGGL(( hyperdifmomsource3_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,ii,ii0,dt);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
hipDeviceSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//hipDeviceSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| ddedd7557ccc74360ddf00b2e1d0aa0f131c9cc0.cu | #include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdm3.cuh"
__global__ void hyperdifmomsource3_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, int ii, int ii0, real dt)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real rdx;
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1));
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//if(i<((p->n[0])) && j<((p->n[1])))
if(i<((p->n[0])) && j<((p->n[1])))
{
dwn1[fencode_hdm3(p,i,j,energy)]=wtemp[fencode_hdm3(p,i,j,tmp6)]*wd[fencode_hdm3(p,i,j,hdnur)]*wtemp[fencode_hdm3(p,i,j,tmp8)]-wtemp[fencode_hdm3(p,i,j,tmp5)]*wd[fencode_hdm3(p,i,j,hdnul)]*wtemp[fencode_hdm3(p,i,j,tmp7)]/(rdx)/2;
dwn1[fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnur)]*wtemp[fencode_hdm3(p,i,j,tmp8)]-wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnul)]*wtemp[fencode_hdm3(p,i,j,tmp7)])/(rdx)/2;
/*dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,energy)]=(
( wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i+(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)
-(wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i-(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1))/2;
dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)-wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1))/2;*/
/*dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,energy)]=(
( wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i+(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)
-(wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+field)]+wmod[(order*NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i-(dim==0),j+(dim==1),mom1+field)])*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/2;
dwn1[(NVAR*(p->n[0])*(p->n[1]))+fencode_hdm3(p,i,j,mom1+ii0)]=(wtemp[fencode_hdm3(p,i,j,tmp2)]*wd[fencode_hdm3(p,i,j,hdnur)]*grad1r_hdm3(wtemp,p,i,j,tmp1,dim)-wtemp[fencode_hdm3(p,i,j,tmp3)]*wd[fencode_hdm3(p,i,j,hdnul)]*grad1l_hdm3(wtemp,p,i,j,tmp1,dim))/2;*/
}
}
__syncthreads();
/* for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i<(ni) && j<(nj))
{
bc_periodic1_hdm3(dwn1,p,i,j,mom1+ii0);
bc_periodic1_hdm3(dwn1,p,i,j,energy);
}
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i<(ni) && j<(nj))
{
bc_periodic2_hdm3(dwn1,p,i,j,mom1+ii0);
bc_periodic2_hdm3(dwn1,p,i,j,energy);
}
}
__syncthreads();*/
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
// if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
if(i<((p->n[0])) && j<((p->n[1])))
{
// - sign here same as vac maybe a +
wmod[fencode_hdm3(p,i,j,mom1+ii0)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdm3(p,i,j,mom1+ii0)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdm3(p,i,j,mom1+ii0)];
wmod[fencode_hdm3(p,i,j,energy)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdm3(p,i,j,energy)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdm3(p,i,j,energy)];
}
// }
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdm3(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifmomsource3(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real **d_wtemp, int field, int dim, int ii, int ii0, real dt)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hyperdifmomsource3_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,ii,ii0,dt);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
cudaThreadSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//cudaThreadSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
d58177446caf55be132663749a4c43ce8d3fed46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shared4R40ops.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shared4R40ops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shared4R40ops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shared4R40ops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d58177446caf55be132663749a4c43ce8d3fed46.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shared4R40ops.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shared4R40ops<<<gridBlock,threadBlock>>>(A,B,C,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shared4R40ops<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shared4R40ops<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9d480d68bc6d68668abf27d1bfc5fe601e9e1305.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <iostream>
#include <fstream>
#include <iterator>
#include <experimental/filesystem>
#include "binFile.h"
#include "linkList.h"
namespace fs = std::experimental::filesystem;
std::string getName(std::string name) {
int ind = 0;
for (int i = 0; i < name.length(); i++) {
if (name.at(i) == '\\')
ind = i;
}
return name.substr(ind + 1, name.length());
}
void getFile(bool isEncryptedFile, binFile & file, std::string path) {
std::ifstream infile(path, std::ios::binary);
infile.seekg(0, infile.end);
int length = infile.tellg();
infile.seekg(0, infile.beg);
file.setName(getName(path));
char* temp = new char[length];
infile.read(temp, length);
if (isEncryptedFile) {
unsigned char t = temp[0];
length = 0x00000000 | (unsigned int)t << 24;
t = temp[1];
length = length | (unsigned int)t << 16;
t = temp[2];
length = length | (unsigned int)t << 8;
t = temp[3];
length = length | (unsigned int)t;
temp = &temp[4];
}
file.setFile(temp, length);
}
int getFileNum(std::string path) {
int re = 0;
for (auto& entry : fs::directory_iterator(path))
re++;
return re;
}
void getFiles(bool isEncryptedFile, binFile * &files, int fileNum, std::string path) {
files = new binFile[fileNum];
size_t ind = 0;
for (const auto& entry : fs::directory_iterator(path)) {
std::ifstream infile(entry.path(), std::ios::binary);
infile.seekg(0, infile.end);
int length = infile.tellg();
infile.seekg(0, infile.beg);
files[ind].setName(getName(entry.path().u8string()));
char* temp = new char[length];
infile.read(temp, length);
if (isEncryptedFile) {
unsigned char t = temp[0];
length = 0x00000000 | (unsigned int)t << 24;
t = temp[1];
length = length | (unsigned int)t << 16;
t = temp[2];
length = length | (unsigned int)t << 8;
t = temp[3];
length = length | (unsigned int)t;
temp = &temp[4];
}
files[ind].setFile(temp, length);
ind++;
}
}
void writeFiles(bool isEncrypt, binFile * files, int fileNum, std::string path) {
for (int i = 0; i < fileNum; i++) {
std::ofstream file;
char* temp = files[i].getFile();
file.open((path + files[i].getName()), std::ios::out | std::ios::binary);
if (isEncrypt) {
char* t = new char[4];
int s = files[i].getSize();
t[0] = s >> 24;
t[1] = (s << 8) >> 24;
t[2] = (s << 16) >> 24;
t[3] = (s << 24) >> 24;
for (int p = 0; p < 4; p++)
file << t[p];
}
for (int p = 0; p < files[i].getSize(); p++)
file << temp[p];
file.close();
}
}
void writeFile(bool isEncrypt, binFile files, std::string path) {
std::ofstream file;
file.open((path + files.getName()), std::ios::out | std::ios::binary);
char* temp = files.getFile();
if (isEncrypt) {
char* t = new char[4];
int s = files.getSize();
t[0] = s >> 24;
t[1] = (s << 8) >> 24;
t[2] = (s << 16) >> 24;
t[3] = (s << 24) >> 24;
for (int p = 0; p < 4; p++)
file << t[p];
for (int p = 0; p < files.getSize(); p++)
file << temp[p];
}
else {
for (int p = 0; p < files.getSize(); p++)
file << temp[p];
}
file.close();
}
__int32 genSequence(bool flipFirst, bool flipLast, __int16 firstID, __int16 lastID)
{
int re = 0x00000000;
re = re | (unsigned int)flipFirst << 31 | (unsigned int)firstID << 16 | (unsigned int)flipLast << 15 | (unsigned int)lastID;
return re;
}
__int32** rnGen(std::string seed, int blockNum, int enLevel, int& sLength) {
srand(1);
int s = 0;
for (int i = 0; i < seed.length(); i++)
s += (int)seed.at(i) * rand();
__int32** sequence = new __int32*[enLevel];
if (blockNum % 2 == 1)
sLength = (blockNum - 1) / 2;
else
sLength = blockNum / 2;
srand(s * rand());
for (int p = 0; p < enLevel; p++) {
sequence[p] = new __int32[sLength];
linkList* list = new linkList;
for (int i = 0; i < blockNum; i++)
list->add(i);
for (int i = 0; i < sLength; i++) {
sequence[p][i] = genSequence(rand() % 2 == 1, rand() % 2 == 1,
(__int16)list->getContent(((float)rand() / RAND_MAX) * list->getSize() + 1),
(__int16)list->getContent(((float)rand() / RAND_MAX) * list->getSize() + 1));
}
}
return sequence;
}
__global__ void d_encrypt(char* array, __int32* sequence) {
int blockID = blockIdx.x;
__int32 s = sequence[blockID];
__int16 ID1 = threadIdx.x + ((s << 1) >> 17)*blockDim.x;
__int16 ID2 = threadIdx.x + ((s << 17) >> 17)*blockDim.x;
bool flip1 = (s >> 31);
bool flip2 = (s << 16) >> 31;
if (flip1 == flip2)
{
array[ID1] = array[ID1] ^ array[ID2];
}
else if (flip1 && !flip2)
{
array[ID1] = (~array[ID1]) ^ array[ID2];
}
else if (!flip1 && flip2)
{
array[ID1] = array[ID1] ^ (~array[ID2]);
}
}
__global__ void d_decrypt(char* array, __int32* sequence) {
int blockID = blockIdx.x;
__int32 s = sequence[blockID];
__int16 ID1 = threadIdx.x + ((s << 1) >> 17) * blockDim.x;
__int16 ID2 = threadIdx.x + ((s << 17) >> 17) * blockDim.x;
bool flip1 = (s >> 31);
bool flip2 = (s << 16) >> 31;
if (flip1 == flip2)
{
array[ID1] = array[ID1] ^ array[ID2];
}
else if (flip1 && !flip2)
{
array[ID1] = array[ID1] ^ (~array[ID2]);
}
else if (!flip1 && flip2)
{
array[ID1] = (~array[ID1]) ^ array[ID2];
}
}
void encrypt(binFile& file, int blockLength, __int32** sequence, int enLevel, int slength) {
for (int i = 0; i < enLevel; i++) {
__int32* d_s = NULL;
char* d_bytes = NULL;
dim3 block(blockLength);
dim3 grid(slength);
hipMalloc((__int32**)& d_s, sizeof(__int32) * slength);
hipMalloc((char**)& d_bytes, sizeof(char) * file.getSize());
hipMemcpy(d_s, sequence[i], slength * sizeof(__int32), hipMemcpyHostToDevice);
hipMemcpy(d_bytes, file.getFile(), file.getSize() * sizeof(char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_encrypt) , dim3(grid), dim3(block) , 0, 0, d_bytes, d_s);
char* h_temp;
h_temp = (char*)malloc(sizeof(char) * file.getSize());
hipMemcpy(h_temp, d_bytes, file.getSize() * sizeof(char), hipMemcpyDeviceToHost);
file.setFile(h_temp, file.getSize());
hipFree(d_s);
hipFree(d_s);
free(h_temp);
}
}
void decrypt(binFile& file, int blockLength, __int32** sequence, int enLevel, int slength) {
for (int i = enLevel - 1; i >= 0; i--) {
__int32* d_s = NULL;
char* d_bytes = NULL;
dim3 block(blockLength);
dim3 grid(slength);
hipMalloc((__int32 **)& d_s, sizeof(__int32) * slength);
hipMalloc((char**)& d_bytes, sizeof(char) * file.getSize());
hipMemcpy(d_s, sequence[i], slength * sizeof(__int32), hipMemcpyHostToDevice);
hipMemcpy(d_bytes, file.getFile(), file.getSize() * sizeof(char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_decrypt) , dim3(grid), dim3(block) , 0, 0, d_bytes, d_s);
char* h_temp;
h_temp = (char*)malloc(sizeof(char) * file.getSize());
hipMemcpy(h_temp, d_bytes, file.getSize() * sizeof(char), hipMemcpyDeviceToHost);
file.setFile(h_temp, file.getSize());
hipFree(d_s);
hipFree(d_s);
free(h_temp);
}
}
void deSequence(bool& flipFirst, bool& flipLast, __int16& firstID, __int16& lastID, __int32 sequence) {
firstID = ((sequence << 1) >> 17);
lastID = ((sequence << 17) >> 17);
flipFirst = (sequence >> 31);
flipLast = (sequence << 16) >> 31;
}
void encrypt_leg(char** array, int chunkDim, __int32 sequence) {
__int16 chunk1ID, chunk2ID;
bool flip1, flip2;
deSequence(flip1, flip2, chunk1ID, chunk2ID, sequence);
char* arr1 = array[chunk1ID];
char* arr2 = array[chunk2ID];
if (flip1 == flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = arr1[i] ^ arr2[i];
}
else if (flip1 && !flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = (~arr1[i]) ^ arr2[i];
}
else if (!flip1 && flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = arr1[i] ^ (~arr2[i]);
}
}
void decrypt_leg(char** array, int chunkDim, __int32 sequence) {
__int16 chunk1ID, chunk2ID;
bool flip1, flip2;
deSequence(flip1, flip2, chunk1ID, chunk2ID, sequence);
char* arr1 = array[chunk1ID];
char* arr2 = array[chunk2ID];
if (flip1 == flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = arr1[i] ^ arr2[i];
}
else if (flip1 && !flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = arr1[i] ^ (~arr2[i]);
}
else if (!flip1 && flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = (~arr1[i]) ^ arr2[i];
}
}
void test()
{
std::string pass = "1";
binFile file;
getFile(false, file, "C:\\Users\\george\\Desktop\\1.txt");
int slength;
int blockDim = 2;
int enlevel = 4;
int blockNum = (file.getSize() + blockDim - 1) / blockDim;
std::cout << blockNum << std::endl;
__int32** s = rnGen(pass, blockNum, enlevel, slength);
std::cout << slength << std::endl;
char** c = new char* [blockNum];
int ind = 0;
char* f = file.getFile();
for (int i = 0; i < blockNum; i++) {
c[i] = new char[blockDim];
for (int p = 0; p < blockDim; p++) {
c[i][p] = f[ind];
ind++;
}
}
for (int i = 0; i < slength; i++)
encrypt_leg(c, blockDim, s[0][i]);
for (int i = slength - 1; i >= 0; i--)
decrypt_leg(c, blockDim, s[0][i]);
encrypt(file, blockDim, s, enlevel, slength);
decrypt(file, blockDim, s, enlevel, slength);
char* experimant = file.getFile();
char* con = new char[file.getSize()];
for (int i = 0; i < file.getSize(); i++)
con[i] = c[i / blockDim][i % blockDim];
for (int i = 0; i < slength; i++)
std::cout << std::hex << s[0][i] << std::endl;
for (int i = 0; i < file.getSize(); i++) {
// if (experimant[i] != con[i])
std::cout << "error at index: " << i << " should be [" << con[i] << "] but was <" << experimant[i] << ">" << std::endl;
}
}
int main(int argc, char** argv)
{
int blockDim = 256;
int blockNum;
using namespace std;
int isEncrypt = 1;
string pass;
string dir;
string dirOut;
int enlevel;
cout << "Do you wants to encrypt(1) or decrypt(2)?" << endl;
cin >> isEncrypt;
cout << "Enter the directory or file pass you want to en/decrypt: ";
cin >> dir;
cout << "Enter the output directory: ";
cin >> dirOut;
cout << "Enter the pass word: ";
cin >> pass;
cout << "Enter the level of the encryption: ";
cin >> enlevel;
if (fs::is_directory(dir)) {
binFile* files;
int fileNum = getFileNum(dir);
__int32*** s = new __int32** [fileNum];
if (isEncrypt == 1) {
getFiles(false, files, fileNum, dir);
for (int i = 0; i < fileNum; i++) {
blockNum = (files[i].getSize() + blockDim - 1) / blockDim;
int slength;
s[i] = rnGen(pass, blockNum, enlevel, slength);
encrypt(files[i], blockNum, s[i], enlevel, slength);
}
writeFiles(true, files, fileNum, dirOut);
}
else if (isEncrypt == 2) {
getFiles(true, files, fileNum, dir);
for (int i = 0; i < fileNum; i++) {
blockNum = (files[i].getSize() + blockDim - 1) / blockDim;
int slength;
s[i] = rnGen(pass, blockNum, enlevel, slength);
decrypt(files[i], blockNum, s[i], enlevel, slength);
}
writeFiles(false, files, fileNum, dirOut);
}
}
else {
binFile file;
int slength;
__int32** s;
if (isEncrypt == 1) {
getFile(false, file, dir);
blockNum = (file.getSize() + blockDim - 1) / blockDim;
s = rnGen(pass, blockNum, enlevel, slength);
encrypt(file, blockNum, s, enlevel, slength);
writeFile(true, file, dirOut);
}
else if (isEncrypt == 2) {
getFile(true, file, dir);
blockNum = (file.getSize() + blockDim - 1) / blockDim;
s = rnGen(pass, blockNum, enlevel, slength);
decrypt(file, blockNum, s, enlevel, slength);
writeFile(false, file, dirOut);
}
}
return(0);
}
| 9d480d68bc6d68668abf27d1bfc5fe601e9e1305.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <iostream>
#include <fstream>
#include <iterator>
#include <experimental/filesystem>
#include "binFile.h"
#include "linkList.h"
namespace fs = std::experimental::filesystem;
std::string getName(std::string name) {
int ind = 0;
for (int i = 0; i < name.length(); i++) {
if (name.at(i) == '\\')
ind = i;
}
return name.substr(ind + 1, name.length());
}
void getFile(bool isEncryptedFile, binFile & file, std::string path) {
std::ifstream infile(path, std::ios::binary);
infile.seekg(0, infile.end);
int length = infile.tellg();
infile.seekg(0, infile.beg);
file.setName(getName(path));
char* temp = new char[length];
infile.read(temp, length);
if (isEncryptedFile) {
unsigned char t = temp[0];
length = 0x00000000 | (unsigned int)t << 24;
t = temp[1];
length = length | (unsigned int)t << 16;
t = temp[2];
length = length | (unsigned int)t << 8;
t = temp[3];
length = length | (unsigned int)t;
temp = &temp[4];
}
file.setFile(temp, length);
}
int getFileNum(std::string path) {
int re = 0;
for (auto& entry : fs::directory_iterator(path))
re++;
return re;
}
void getFiles(bool isEncryptedFile, binFile * &files, int fileNum, std::string path) {
files = new binFile[fileNum];
size_t ind = 0;
for (const auto& entry : fs::directory_iterator(path)) {
std::ifstream infile(entry.path(), std::ios::binary);
infile.seekg(0, infile.end);
int length = infile.tellg();
infile.seekg(0, infile.beg);
files[ind].setName(getName(entry.path().u8string()));
char* temp = new char[length];
infile.read(temp, length);
if (isEncryptedFile) {
unsigned char t = temp[0];
length = 0x00000000 | (unsigned int)t << 24;
t = temp[1];
length = length | (unsigned int)t << 16;
t = temp[2];
length = length | (unsigned int)t << 8;
t = temp[3];
length = length | (unsigned int)t;
temp = &temp[4];
}
files[ind].setFile(temp, length);
ind++;
}
}
void writeFiles(bool isEncrypt, binFile * files, int fileNum, std::string path) {
for (int i = 0; i < fileNum; i++) {
std::ofstream file;
char* temp = files[i].getFile();
file.open((path + files[i].getName()), std::ios::out | std::ios::binary);
if (isEncrypt) {
char* t = new char[4];
int s = files[i].getSize();
t[0] = s >> 24;
t[1] = (s << 8) >> 24;
t[2] = (s << 16) >> 24;
t[3] = (s << 24) >> 24;
for (int p = 0; p < 4; p++)
file << t[p];
}
for (int p = 0; p < files[i].getSize(); p++)
file << temp[p];
file.close();
}
}
void writeFile(bool isEncrypt, binFile files, std::string path) {
std::ofstream file;
file.open((path + files.getName()), std::ios::out | std::ios::binary);
char* temp = files.getFile();
if (isEncrypt) {
char* t = new char[4];
int s = files.getSize();
t[0] = s >> 24;
t[1] = (s << 8) >> 24;
t[2] = (s << 16) >> 24;
t[3] = (s << 24) >> 24;
for (int p = 0; p < 4; p++)
file << t[p];
for (int p = 0; p < files.getSize(); p++)
file << temp[p];
}
else {
for (int p = 0; p < files.getSize(); p++)
file << temp[p];
}
file.close();
}
__int32 genSequence(bool flipFirst, bool flipLast, __int16 firstID, __int16 lastID)
{
int re = 0x00000000;
re = re | (unsigned int)flipFirst << 31 | (unsigned int)firstID << 16 | (unsigned int)flipLast << 15 | (unsigned int)lastID;
return re;
}
__int32** rnGen(std::string seed, int blockNum, int enLevel, int& sLength) {
srand(1);
int s = 0;
for (int i = 0; i < seed.length(); i++)
s += (int)seed.at(i) * rand();
__int32** sequence = new __int32*[enLevel];
if (blockNum % 2 == 1)
sLength = (blockNum - 1) / 2;
else
sLength = blockNum / 2;
srand(s * rand());
for (int p = 0; p < enLevel; p++) {
sequence[p] = new __int32[sLength];
linkList* list = new linkList;
for (int i = 0; i < blockNum; i++)
list->add(i);
for (int i = 0; i < sLength; i++) {
sequence[p][i] = genSequence(rand() % 2 == 1, rand() % 2 == 1,
(__int16)list->getContent(((float)rand() / RAND_MAX) * list->getSize() + 1),
(__int16)list->getContent(((float)rand() / RAND_MAX) * list->getSize() + 1));
}
}
return sequence;
}
__global__ void d_encrypt(char* array, __int32* sequence) {
int blockID = blockIdx.x;
__int32 s = sequence[blockID];
__int16 ID1 = threadIdx.x + ((s << 1) >> 17)*blockDim.x;
__int16 ID2 = threadIdx.x + ((s << 17) >> 17)*blockDim.x;
bool flip1 = (s >> 31);
bool flip2 = (s << 16) >> 31;
if (flip1 == flip2)
{
array[ID1] = array[ID1] ^ array[ID2];
}
else if (flip1 && !flip2)
{
array[ID1] = (~array[ID1]) ^ array[ID2];
}
else if (!flip1 && flip2)
{
array[ID1] = array[ID1] ^ (~array[ID2]);
}
}
__global__ void d_decrypt(char* array, __int32* sequence) {
int blockID = blockIdx.x;
__int32 s = sequence[blockID];
__int16 ID1 = threadIdx.x + ((s << 1) >> 17) * blockDim.x;
__int16 ID2 = threadIdx.x + ((s << 17) >> 17) * blockDim.x;
bool flip1 = (s >> 31);
bool flip2 = (s << 16) >> 31;
if (flip1 == flip2)
{
array[ID1] = array[ID1] ^ array[ID2];
}
else if (flip1 && !flip2)
{
array[ID1] = array[ID1] ^ (~array[ID2]);
}
else if (!flip1 && flip2)
{
array[ID1] = (~array[ID1]) ^ array[ID2];
}
}
void encrypt(binFile& file, int blockLength, __int32** sequence, int enLevel, int slength) {
for (int i = 0; i < enLevel; i++) {
__int32* d_s = NULL;
char* d_bytes = NULL;
dim3 block(blockLength);
dim3 grid(slength);
cudaMalloc((__int32**)& d_s, sizeof(__int32) * slength);
cudaMalloc((char**)& d_bytes, sizeof(char) * file.getSize());
cudaMemcpy(d_s, sequence[i], slength * sizeof(__int32), cudaMemcpyHostToDevice);
cudaMemcpy(d_bytes, file.getFile(), file.getSize() * sizeof(char), cudaMemcpyHostToDevice);
d_encrypt <<< grid, block >>> (d_bytes, d_s);
char* h_temp;
h_temp = (char*)malloc(sizeof(char) * file.getSize());
cudaMemcpy(h_temp, d_bytes, file.getSize() * sizeof(char), cudaMemcpyDeviceToHost);
file.setFile(h_temp, file.getSize());
cudaFree(d_s);
cudaFree(d_s);
free(h_temp);
}
}
void decrypt(binFile& file, int blockLength, __int32** sequence, int enLevel, int slength) {
for (int i = enLevel - 1; i >= 0; i--) {
__int32* d_s = NULL;
char* d_bytes = NULL;
dim3 block(blockLength);
dim3 grid(slength);
cudaMalloc((__int32 **)& d_s, sizeof(__int32) * slength);
cudaMalloc((char**)& d_bytes, sizeof(char) * file.getSize());
cudaMemcpy(d_s, sequence[i], slength * sizeof(__int32), cudaMemcpyHostToDevice);
cudaMemcpy(d_bytes, file.getFile(), file.getSize() * sizeof(char), cudaMemcpyHostToDevice);
d_decrypt <<< grid, block >>> (d_bytes, d_s);
char* h_temp;
h_temp = (char*)malloc(sizeof(char) * file.getSize());
cudaMemcpy(h_temp, d_bytes, file.getSize() * sizeof(char), cudaMemcpyDeviceToHost);
file.setFile(h_temp, file.getSize());
cudaFree(d_s);
cudaFree(d_s);
free(h_temp);
}
}
void deSequence(bool& flipFirst, bool& flipLast, __int16& firstID, __int16& lastID, __int32 sequence) {
firstID = ((sequence << 1) >> 17);
lastID = ((sequence << 17) >> 17);
flipFirst = (sequence >> 31);
flipLast = (sequence << 16) >> 31;
}
void encrypt_leg(char** array, int chunkDim, __int32 sequence) {
__int16 chunk1ID, chunk2ID;
bool flip1, flip2;
deSequence(flip1, flip2, chunk1ID, chunk2ID, sequence);
char* arr1 = array[chunk1ID];
char* arr2 = array[chunk2ID];
if (flip1 == flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = arr1[i] ^ arr2[i];
}
else if (flip1 && !flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = (~arr1[i]) ^ arr2[i];
}
else if (!flip1 && flip2)
{
for (int i = 0; i < chunkDim; i++)
arr1[i] = arr1[i] ^ (~arr2[i]);
}
}
void decrypt_leg(char** array, int chunkDim, __int32 sequence) {
__int16 chunk1ID, chunk2ID;
bool flip1, flip2;
deSequence(flip1, flip2, chunk1ID, chunk2ID, sequence);
char* arr1 = array[chunk1ID];
char* arr2 = array[chunk2ID];
if (flip1 == flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = arr1[i] ^ arr2[i];
}
else if (flip1 && !flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = arr1[i] ^ (~arr2[i]);
}
else if (!flip1 && flip2)
{
for (int i = chunkDim - 1; i >= 0; i--)
arr1[i] = (~arr1[i]) ^ arr2[i];
}
}
void test()
{
std::string pass = "1";
binFile file;
getFile(false, file, "C:\\Users\\george\\Desktop\\1.txt");
int slength;
int blockDim = 2;
int enlevel = 4;
int blockNum = (file.getSize() + blockDim - 1) / blockDim;
std::cout << blockNum << std::endl;
__int32** s = rnGen(pass, blockNum, enlevel, slength);
std::cout << slength << std::endl;
char** c = new char* [blockNum];
int ind = 0;
char* f = file.getFile();
for (int i = 0; i < blockNum; i++) {
c[i] = new char[blockDim];
for (int p = 0; p < blockDim; p++) {
c[i][p] = f[ind];
ind++;
}
}
for (int i = 0; i < slength; i++)
encrypt_leg(c, blockDim, s[0][i]);
for (int i = slength - 1; i >= 0; i--)
decrypt_leg(c, blockDim, s[0][i]);
encrypt(file, blockDim, s, enlevel, slength);
decrypt(file, blockDim, s, enlevel, slength);
char* experimant = file.getFile();
char* con = new char[file.getSize()];
for (int i = 0; i < file.getSize(); i++)
con[i] = c[i / blockDim][i % blockDim];
for (int i = 0; i < slength; i++)
std::cout << std::hex << s[0][i] << std::endl;
for (int i = 0; i < file.getSize(); i++) {
// if (experimant[i] != con[i])
std::cout << "error at index: " << i << " should be [" << con[i] << "] but was <" << experimant[i] << ">" << std::endl;
}
}
int main(int argc, char** argv)
{
int blockDim = 256;
int blockNum;
using namespace std;
int isEncrypt = 1;
string pass;
string dir;
string dirOut;
int enlevel;
cout << "Do you wants to encrypt(1) or decrypt(2)?" << endl;
cin >> isEncrypt;
cout << "Enter the directory or file pass you want to en/decrypt: ";
cin >> dir;
cout << "Enter the output directory: ";
cin >> dirOut;
cout << "Enter the pass word: ";
cin >> pass;
cout << "Enter the level of the encryption: ";
cin >> enlevel;
if (fs::is_directory(dir)) {
binFile* files;
int fileNum = getFileNum(dir);
__int32*** s = new __int32** [fileNum];
if (isEncrypt == 1) {
getFiles(false, files, fileNum, dir);
for (int i = 0; i < fileNum; i++) {
blockNum = (files[i].getSize() + blockDim - 1) / blockDim;
int slength;
s[i] = rnGen(pass, blockNum, enlevel, slength);
encrypt(files[i], blockNum, s[i], enlevel, slength);
}
writeFiles(true, files, fileNum, dirOut);
}
else if (isEncrypt == 2) {
getFiles(true, files, fileNum, dir);
for (int i = 0; i < fileNum; i++) {
blockNum = (files[i].getSize() + blockDim - 1) / blockDim;
int slength;
s[i] = rnGen(pass, blockNum, enlevel, slength);
decrypt(files[i], blockNum, s[i], enlevel, slength);
}
writeFiles(false, files, fileNum, dirOut);
}
}
else {
binFile file;
int slength;
__int32** s;
if (isEncrypt == 1) {
getFile(false, file, dir);
blockNum = (file.getSize() + blockDim - 1) / blockDim;
s = rnGen(pass, blockNum, enlevel, slength);
encrypt(file, blockNum, s, enlevel, slength);
writeFile(true, file, dirOut);
}
else if (isEncrypt == 2) {
getFile(true, file, dir);
blockNum = (file.getSize() + blockDim - 1) / blockDim;
s = rnGen(pass, blockNum, enlevel, slength);
decrypt(file, blockNum, s, enlevel, slength);
writeFile(false, file, dirOut);
}
}
return(0);
}
|
8eb20af97a0c495bafd74dbcd3769ce449e23c3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_subs.h"
hipDoubleComplex *dev_rhophon;
hipDoubleComplex *dev_rhotot;
hipDoubleComplex *dev_rhonew;
hipDoubleComplex *dev_rhoaux;
hipDoubleComplex *dev_Drho;
hipDoubleComplex *dev_Htot1;
hipDoubleComplex *dev_Htot2;
hipDoubleComplex *dev_Htot3;
hipDoubleComplex *dev_mutot;
hipDoubleComplex *dev_dvdx;
double *dev_vbath;
double *dev_fb;
double *dev_xi;
double *dev_vi;
double *dev_ki;
double *dev_xf;
double *dev_vf;
double *dev_xh;
UNINT Ncores1;
UNINT Ncores2;
//##############################################################################
// This function build the Hamiltonian without CEED:
// H_in = H_e + H_phon + H_e-phon - This matrix is the same always.
// H_out = H_in + E * mu + V_bath
__global__ void update_H_tot(hipDoubleComplex *H_out, hipDoubleComplex *H_in,
hipDoubleComplex *mu_tot,
double *v_bath_mat, double *fb_vec,
double sum_xi, double Efield, int n_el,
int n_phon, int np_levels, int n_tot){
hipDoubleComplex aux1;
hipDoubleComplex aux2;
hipDoubleComplex aux3;
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = n_tot * n_tot;
if (ind < dim2){
int i1 = ind / n_tot;
int i_e = i1 / (n_phon*np_levels);
H_out[ind] = H_in[ind];
if ( ind == i1 + i1*n_tot ){
aux1 = make_cuDoubleComplex(fb_vec[i_e] * sum_xi, 0.0e0);
H_out[ind] = cuCadd(H_out[ind],aux1);
}
aux1 = make_cuDoubleComplex(Efield, 0.0e0);
aux1 = cuCmul(aux1, mu_tot[ind]);
aux2 = make_cuDoubleComplex(sum_xi*v_bath_mat[ind], 0.0e0);
aux3 = cuCadd(aux1,aux2);
H_out[ind] = cuCadd(H_out[ind], aux3);
}
return;
}
//##############################################################################
//This function extract the diagonal terms of the matrix matA in vecA
__global__ void get_diag(hipDoubleComplex *matA, hipDoubleComplex *vecA,
int n_tot){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = n_tot * n_tot;
int i1 = ind / n_tot;
if ((ind == i1 + i1*n_tot) && (ind < dim2)){
vecA[i1] = matA[ind];
}
return;
}
//##############################################################################
__global__ void build_rhophon(hipDoubleComplex *rho_tot,
hipDoubleComplex *rho_phon, int n_el ,int n_phon,
int np_levels, int n_tot){
int ind1 = threadIdx.x + blockIdx.x * blockDim.x;
int dim1 = n_phon * np_levels;
int dim2 = dim1 * dim1;
if (ind1 < dim2){
int jj = ind1/dim1;
int ii = ind1 - jj * dim1;
rho_phon[ind1] = make_cuDoubleComplex(0.0e0, 0.0e0);
for (int kk=0; kk<n_el; kk++){
int ind2 = (ii + kk * dim1) + (jj + kk * dim1) * n_tot;
rho_phon[ind1] = cuCadd(rho_tot[ind2], rho_phon[ind1]);
}
}
return;
}
//##############################################################################
__global__ void move_x(double *xi_vec, double *vi_vec, double *xf_vec,
double dt, int n_bath){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < n_bath){
xf_vec[ind] = xi_vec[ind] + vi_vec[ind] * dt;
}
return;
}
//##############################################################################
__global__ void get_partial_sum(double *xi_vec, double *sum_vec, int n_bath){
__shared__ double cache[Nthreads];
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
cache[cacheIndex] = 0.0e0;
if (ind < n_bath){
cache[cacheIndex] = xi_vec[ind];
__syncthreads();
int ii = blockDim.x/2;
while (ii != 0) {
if (cacheIndex < ii){
cache[cacheIndex] += cache[cacheIndex + ii];
}
__syncthreads();
ii /= 2;
}
if (cacheIndex == 0){
sum_vec[blockIdx.x] = cache[0];
}
}
return;
}
//##############################################################################
__global__ void get_partial_Ek(double *vi_vec, double *sum_vec, int n_bath){
__shared__ double cache[Nthreads];
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
cache[cacheIndex] = 0.0e0;
if (ind < n_bath){
cache[cacheIndex] = vi_vec[ind]*vi_vec[ind];
__syncthreads();
int ii = blockDim.x/2;
while (ii != 0) {
if (cacheIndex < ii){
cache[cacheIndex] += cache[cacheIndex + ii];
}
__syncthreads();
ii /= 2;
}
if (cacheIndex == 0){
sum_vec[blockIdx.x] = cache[0];
}
}
return;
}
//##############################################################################
__global__ void move_v(double *xi_vec, double *vi_vec, double *ki_vec,
double *vf_vec, double qforce, double dt,
int n_bath){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < n_bath){
double ai = - ki_vec[ind] * xi_vec[ind] + qforce;
vf_vec[ind] = vi_vec[ind] + ai * dt;
}
}
//##############################################################################
__global__ void update_mat(hipDoubleComplex *matA, hipDoubleComplex *matB,
int dim){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = dim * dim;
if (ind < dim2){
matA[ind] = matB[ind];
}
return;
}
//##############################################################################
__global__ void update_vec(double *vecA, double *vecB, int dim){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < dim){
vecA[ind] = vecB[ind];
}
return;
}
//##############################################################################
void init_cuda(complex<double> *H_tot, complex<double> *mu_tot,
double *v_bath_mat, double *fb_vec, double *xi_vec,
double *vi_vec, double *ki_vec,
complex<double> *rho_tot,
complex<double> *rho_phon, complex<double> *dVdX_mat,
UNINT n_el, UNINT n_phon, UNINT np_levels, UNINT n_tot,
UNINT n_bath){
double gaux = (double) (n_tot*n_tot);
double taux = (double) Nthreads;
Ncores1 = (UNINT) ceil(gaux/taux);
gaux = (double) (n_bath);
Ncores2 = (UNINT) ceil(gaux/taux);
int dimaux = n_phon * n_phon * np_levels * np_levels;
hipMalloc((void**) &dev_Htot1 , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_Htot2 , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_Htot3 , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_mutot , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_rhotot , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_rhonew , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_rhoaux , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_Drho , n_tot*n_tot*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_rhophon, dimaux*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_dvdx , dimaux*sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_vbath , n_tot*n_tot*sizeof(double));
hipMalloc((void**) &dev_fb , n_el*sizeof(double));
hipMalloc((void**) &dev_xi , n_bath*sizeof(double));
hipMalloc((void**) &dev_vi , n_bath*sizeof(double));
hipMalloc((void**) &dev_ki , n_bath*sizeof(double));
hipMalloc((void**) &dev_xf , n_bath*sizeof(double));
hipMalloc((void**) &dev_vf , n_bath*sizeof(double));
hipMalloc((void**) &dev_xh , n_bath*sizeof(double));
hipMemcpy(dev_Htot1, H_tot, n_tot*n_tot*sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_mutot, mu_tot, n_tot*n_tot*sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_rhotot, rho_tot, n_tot*n_tot*sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_rhophon, rho_phon, dimaux*sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_dvdx, dVdX_mat, dimaux*sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_vbath, v_bath_mat, n_tot*n_tot*sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_fb, fb_vec, n_el*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_xi, xi_vec, n_bath*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_vi, vi_vec, n_bath*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_ki, ki_vec, n_bath*sizeof(double), hipMemcpyHostToDevice);
return;
}
//##############################################################################
void free_cuda_memory(){
hipFree(dev_Htot1);
hipFree(dev_Htot2);
hipFree(dev_Htot3);
hipFree(dev_mutot);
hipFree(dev_rhotot);
hipFree(dev_rhonew);
hipFree(dev_rhoaux);
hipFree(dev_Drho);
hipFree(dev_rhophon);
hipFree(dev_dvdx);
hipFree(dev_vbath);
hipFree(dev_fb);
hipFree(dev_ki);
hipFree(dev_xi);
hipFree(dev_xf);
hipFree(dev_xh);
hipFree(dev_vi);
hipFree(dev_vf);
return;
}
//##############################################################################
void matmul_cublas(hipDoubleComplex *dev_A, hipDoubleComplex *dev_B,
hipDoubleComplex *dev_C, int dim){
const hipDoubleComplex alf = make_cuDoubleComplex(1.0,0.0);
const hipDoubleComplex bet = make_cuDoubleComplex(0.0, 0.0);
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta = &bet;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication
hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, dev_A,
dim, dev_B, dim, beta, dev_C, dim);
// Destroy the handle
hipblasDestroy(handle);
return;
}
//##############################################################################
void commute_cuda(hipDoubleComplex *dev_A, hipDoubleComplex *dev_B,
hipDoubleComplex *dev_C, int dim, const hipDoubleComplex alf){
const hipDoubleComplex bet1 = make_cuDoubleComplex(0.0, 0.0);
const hipDoubleComplex bet2 = make_cuDoubleComplex(-1.0, 0.0);
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta1 = &bet1;
const hipDoubleComplex *beta2 = &bet2;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Computing B.A
hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, dev_B,
dim, dev_A, dim, beta1, dev_C, dim);
// Computing A.B - B.A
hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, dev_A,
dim, dev_B, dim, beta2, dev_C, dim);
// Destroy the handle
hipblasDestroy(handle);
return;
}
//##############################################################################
void matadd_cublas(hipDoubleComplex *dev_A, hipDoubleComplex *dev_B,
hipDoubleComplex *dev_C, int dim, const hipDoubleComplex alf,
const hipDoubleComplex bet){
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta = &bet;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication
hipblasZgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, alpha, dev_A,
dim, beta ,dev_B, dim, dev_C, dim);
// Destroy the handle
hipblasDestroy(handle);
return;
}
//##############################################################################
double get_trace_cuda(hipDoubleComplex *dev_A, UNINT dim){
// hipDoubleComplex aux1= make_cuDoubleComplex(0.0e0, 0.0e0);
complex<double> aux1;
double aux2;
complex<double> aux_vec[dim];
hipDoubleComplex *dev_vec;
hipMalloc((void**) &dev_vec, dim * sizeof(hipDoubleComplex));
hipLaunchKernelGGL(( get_diag), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_A, dev_vec, dim);
hipMemcpy(aux_vec, dev_vec, dim*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
for(int ii=0;ii<dim;ii++){
aux1 += aux_vec[ii];
}
aux2 = aux1.real();
hipFree(dev_vec);
return aux2;
}
//##############################################################################
void include_Hceed_cuda(hipDoubleComplex *dev_Hout, hipDoubleComplex *dev_Hin,
hipDoubleComplex *dev_mu, hipDoubleComplex *dev_rhoin,
double a_ceed, int n_tot){
int dim2 = n_tot*n_tot;
double dmu2;
hipDoubleComplex *dev_aux1, *dev_aux2, *dev_Hceed;
const hipDoubleComplex alf = make_cuDoubleComplex(1.0,0.0);
hipMalloc((void**) &dev_aux1, dim2 * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_aux2, dim2 * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_Hceed, dim2 * sizeof(hipDoubleComplex));
commute_cuda(dev_mu, dev_Hin, dev_Hceed, n_tot, alf);
commute_cuda(dev_Hceed, dev_Hin, dev_aux1, n_tot, alf);
matmul_cublas(dev_rhoin, dev_aux1, dev_aux2, n_tot);
dmu2 = get_trace_cuda(dev_aux2, n_tot);
const hipDoubleComplex bet = make_cuDoubleComplex(0.0, a_ceed*dmu2);
matadd_cublas(dev_Hin, dev_Hceed, dev_Hout, n_tot, alf, bet);
hipFree(dev_aux1);
hipFree(dev_aux2);
hipFree(dev_Hceed);
return;
}
//##############################################################################
double get_Qforces_cuda(hipDoubleComplex *dev_rhoin ,double *fb_vec,
UNINT n_el, UNINT n_phon, UNINT np_levels, UNINT n_tot){
UNINT dim1 = np_levels * n_phon;
UNINT dim2 = dim1 * dim1;
double qforce = 0.0e0;
complex<double> aux_vec[n_tot];
hipDoubleComplex *dev_vec, *dev_mat;
hipMalloc((void**) &dev_vec, n_tot * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_mat, dim2 * sizeof(hipDoubleComplex));
hipLaunchKernelGGL(( get_diag), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_rhoin, dev_vec, n_tot);
hipMemcpy(aux_vec, dev_vec, n_tot*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
for (int kk=0; kk<n_el; kk++){
for (int ii=0; ii<dim1; ii++){
qforce += -aux_vec[ii+kk*dim1].real() * fb_vec[kk];
}
}
hipLaunchKernelGGL(( build_rhophon), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_rhoin, dev_rhophon, n_el , n_phon,
np_levels, n_tot);
matmul_cublas(dev_rhophon, dev_dvdx, dev_mat, dim1);
hipLaunchKernelGGL(( get_diag), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_mat, dev_vec, dim1);
hipMemcpy(aux_vec, dev_vec, n_tot*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
for (int ii=0; ii<dim1; ii++){
qforce += -aux_vec[ii].real();
}
hipFree(dev_vec);
hipFree(dev_mat);
return qforce;
}
//##############################################################################
void runge_kutta_propagator_cuda(double mass_bath, double a_ceed, double dt,
double Efield, double Efieldaux,
double *fb_vec, int tt, UNINT n_el,
UNINT n_phon, UNINT np_levels,
UNINT n_tot, UNINT n_bath){
const hipDoubleComplex alf1 = make_cuDoubleComplex(0.0e0, -0.5*dt);
const hipDoubleComplex alf2 = make_cuDoubleComplex(0.0e0, -dt);
const hipDoubleComplex alf3 = make_cuDoubleComplex(1.0e0, 0.0e0);
double *dev_partialvec;
double partialvec[Ncores2];
double sum_xi;
double dth = 0.5e0 * dt;
double qforce;
//double time = dt * tt;
hipMalloc((void**) &dev_partialvec, Ncores2*sizeof(double));
//Calculating the sum of all the coordintes of the bath----------------------
hipLaunchKernelGGL(( get_partial_sum), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xi, dev_partialvec, n_bath);
hipMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
hipMemcpyDeviceToHost);
sum_xi = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
sum_xi += partialvec[ii];
}
//---------------------------------------------------------------------------
//Efield_t = Efield * exp(-pow(((time-10.0)/0.2),2.0));
//Building the new Hamiltonian at time = t ----------------------------------
hipLaunchKernelGGL(( update_H_tot), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_Htot2, dev_Htot1, dev_mutot,
dev_vbath, dev_fb, sum_xi, Efield,
n_el, n_phon, np_levels, n_tot);
//Including CEED Hamiltonian:
include_Hceed_cuda(dev_Htot3, dev_Htot2, dev_mutot, dev_rhotot, a_ceed,
n_tot);
//---------------------------------------------------------------------------
//Calculating rho(t+dt/2) using LvN------------------------------------------
commute_cuda(dev_Htot3, dev_rhotot, dev_Drho, n_tot, alf3);
matadd_cublas(dev_rhotot, dev_Drho, dev_rhoaux, n_tot, alf3, alf1);
//---------------------------------------------------------------------------
//Calculating x(t+dt/2) and v(t+dt/2) using the Quantum forces --------------
qforce = get_Qforces_cuda(dev_rhotot , fb_vec, n_el, n_phon, np_levels,
n_tot);
qforce = qforce/mass_bath;
hipLaunchKernelGGL(( move_x), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xi, dev_vi, dev_xh, dth, n_bath);
hipLaunchKernelGGL(( move_v), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xi, dev_vi, dev_ki, dev_vf, qforce, dth,
n_bath);
//---------------------------------------------------------------------------
//Hencefort we repeat everything to obtain everything in t + dt -------------
hipLaunchKernelGGL(( get_partial_sum), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xh, dev_partialvec, n_bath);
hipMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
hipMemcpyDeviceToHost);
sum_xi = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
sum_xi += partialvec[ii];
}
//Efield_t = Efield * exp(-pow(((time+dth-10.0)/0.2),2.0));
hipLaunchKernelGGL(( update_H_tot), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_Htot2, dev_Htot1, dev_mutot,
dev_vbath, dev_fb, sum_xi, Efieldaux,
n_el, n_phon, np_levels, n_tot);
include_Hceed_cuda(dev_Htot3, dev_Htot2, dev_mutot, dev_rhoaux, a_ceed,
n_tot);
commute_cuda(dev_Htot3, dev_rhoaux, dev_Drho, n_tot, alf3);
matadd_cublas(dev_rhotot, dev_Drho, dev_rhonew, n_tot, alf3, alf2);
qforce = get_Qforces_cuda(dev_rhoaux , fb_vec, n_el, n_phon, np_levels,
n_tot);
qforce = qforce/mass_bath;
hipLaunchKernelGGL(( move_x), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xi, dev_vf, dev_xf, dt, n_bath);
hipLaunchKernelGGL(( move_v), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xh, dev_vi, dev_ki, dev_vf, qforce, dt,
n_bath);
//---------------------------------------------------------------------------
//We update rho, x and v:
hipLaunchKernelGGL(( update_mat), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_rhotot, dev_rhonew, n_tot);
hipLaunchKernelGGL(( update_vec), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_xi, dev_xf, n_bath);
hipLaunchKernelGGL(( update_vec), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_vi, dev_vf, n_bath);
hipFree(dev_partialvec);
return;
}
//##############################################################################
void calcrhophon(hipDoubleComplex *dev_rhoin, int n_el, int n_phon,
int np_levels, int n_tot){
hipLaunchKernelGGL(( build_rhophon), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_rhoin, dev_rhophon, n_el , n_phon,
np_levels, n_tot);
return;
}
//##############################################################################
void getingmat(complex<double> *matA, hipDoubleComplex *dev_A, int n_tot){
hipMemcpy(matA, dev_A, n_tot*n_tot*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
return;
}
//##############################################################################
void getting_printing_info(double *Ener, double *mu, complex<double> *tr_rho,
double *Ek_bath, complex<double> *rho_tot,
UNINT n_tot, UNINT n_bath){
int dim2 = n_tot * n_tot;
hipDoubleComplex *dev_aux1;
hipDoubleComplex *dev_vec;
double *dev_partialvec;
double partialvec[Ncores2];
hipMalloc((void**) &dev_aux1, dim2 * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_vec, n_tot * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_partialvec, Ncores2*sizeof(double));
matmul_cublas(dev_rhotot, dev_Htot1, dev_aux1, n_tot);
*Ener = get_trace_cuda(dev_aux1, n_tot);
matmul_cublas(dev_rhotot, dev_mutot, dev_aux1, n_tot);
*mu = get_trace_cuda(dev_aux1, n_tot);
hipLaunchKernelGGL(( get_diag), dim3(Ncores1), dim3(Nthreads), 0, 0, dev_rhotot, dev_vec, n_tot);
hipMemcpy(tr_rho, dev_vec, n_tot*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( get_partial_Ek), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_vi, dev_partialvec, n_bath);
hipMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
hipMemcpyDeviceToHost);
*Ek_bath = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
*Ek_bath += 0.5e0 * partialvec[ii];
}
hipLaunchKernelGGL(( get_partial_sum), dim3(Ncores2), dim3(Nthreads), 0, 0, dev_vi, dev_partialvec, n_bath);
hipMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
hipMemcpyDeviceToHost);
*xi_tot = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
*xi_tot += partialvec[ii];
}
hipMemcpy(rho_tot, dev_rhotot, n_tot*n_tot*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
hipFree(dev_vec);
hipFree(dev_aux1);
hipFree(dev_partialvec);
return;
}
//##############################################################################
| 8eb20af97a0c495bafd74dbcd3769ce449e23c3a.cu | #include "cuda_subs.h"
cuDoubleComplex *dev_rhophon;
cuDoubleComplex *dev_rhotot;
cuDoubleComplex *dev_rhonew;
cuDoubleComplex *dev_rhoaux;
cuDoubleComplex *dev_Drho;
cuDoubleComplex *dev_Htot1;
cuDoubleComplex *dev_Htot2;
cuDoubleComplex *dev_Htot3;
cuDoubleComplex *dev_mutot;
cuDoubleComplex *dev_dvdx;
double *dev_vbath;
double *dev_fb;
double *dev_xi;
double *dev_vi;
double *dev_ki;
double *dev_xf;
double *dev_vf;
double *dev_xh;
UNINT Ncores1;
UNINT Ncores2;
//##############################################################################
// This function build the Hamiltonian without CEED:
// H_in = H_e + H_phon + H_e-phon - This matrix is the same always.
// H_out = H_in + E * mu + V_bath
__global__ void update_H_tot(cuDoubleComplex *H_out, cuDoubleComplex *H_in,
cuDoubleComplex *mu_tot,
double *v_bath_mat, double *fb_vec,
double sum_xi, double Efield, int n_el,
int n_phon, int np_levels, int n_tot){
cuDoubleComplex aux1;
cuDoubleComplex aux2;
cuDoubleComplex aux3;
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = n_tot * n_tot;
if (ind < dim2){
int i1 = ind / n_tot;
int i_e = i1 / (n_phon*np_levels);
H_out[ind] = H_in[ind];
if ( ind == i1 + i1*n_tot ){
aux1 = make_cuDoubleComplex(fb_vec[i_e] * sum_xi, 0.0e0);
H_out[ind] = cuCadd(H_out[ind],aux1);
}
aux1 = make_cuDoubleComplex(Efield, 0.0e0);
aux1 = cuCmul(aux1, mu_tot[ind]);
aux2 = make_cuDoubleComplex(sum_xi*v_bath_mat[ind], 0.0e0);
aux3 = cuCadd(aux1,aux2);
H_out[ind] = cuCadd(H_out[ind], aux3);
}
return;
}
//##############################################################################
//This function extract the diagonal terms of the matrix matA in vecA
__global__ void get_diag(cuDoubleComplex *matA, cuDoubleComplex *vecA,
int n_tot){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = n_tot * n_tot;
int i1 = ind / n_tot;
if ((ind == i1 + i1*n_tot) && (ind < dim2)){
vecA[i1] = matA[ind];
}
return;
}
//##############################################################################
__global__ void build_rhophon(cuDoubleComplex *rho_tot,
cuDoubleComplex *rho_phon, int n_el ,int n_phon,
int np_levels, int n_tot){
int ind1 = threadIdx.x + blockIdx.x * blockDim.x;
int dim1 = n_phon * np_levels;
int dim2 = dim1 * dim1;
if (ind1 < dim2){
int jj = ind1/dim1;
int ii = ind1 - jj * dim1;
rho_phon[ind1] = make_cuDoubleComplex(0.0e0, 0.0e0);
for (int kk=0; kk<n_el; kk++){
int ind2 = (ii + kk * dim1) + (jj + kk * dim1) * n_tot;
rho_phon[ind1] = cuCadd(rho_tot[ind2], rho_phon[ind1]);
}
}
return;
}
//##############################################################################
__global__ void move_x(double *xi_vec, double *vi_vec, double *xf_vec,
double dt, int n_bath){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < n_bath){
xf_vec[ind] = xi_vec[ind] + vi_vec[ind] * dt;
}
return;
}
//##############################################################################
__global__ void get_partial_sum(double *xi_vec, double *sum_vec, int n_bath){
__shared__ double cache[Nthreads];
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
cache[cacheIndex] = 0.0e0;
if (ind < n_bath){
cache[cacheIndex] = xi_vec[ind];
__syncthreads();
int ii = blockDim.x/2;
while (ii != 0) {
if (cacheIndex < ii){
cache[cacheIndex] += cache[cacheIndex + ii];
}
__syncthreads();
ii /= 2;
}
if (cacheIndex == 0){
sum_vec[blockIdx.x] = cache[0];
}
}
return;
}
//##############################################################################
__global__ void get_partial_Ek(double *vi_vec, double *sum_vec, int n_bath){
__shared__ double cache[Nthreads];
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
cache[cacheIndex] = 0.0e0;
if (ind < n_bath){
cache[cacheIndex] = vi_vec[ind]*vi_vec[ind];
__syncthreads();
int ii = blockDim.x/2;
while (ii != 0) {
if (cacheIndex < ii){
cache[cacheIndex] += cache[cacheIndex + ii];
}
__syncthreads();
ii /= 2;
}
if (cacheIndex == 0){
sum_vec[blockIdx.x] = cache[0];
}
}
return;
}
//##############################################################################
__global__ void move_v(double *xi_vec, double *vi_vec, double *ki_vec,
double *vf_vec, double qforce, double dt,
int n_bath){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < n_bath){
double ai = - ki_vec[ind] * xi_vec[ind] + qforce;
vf_vec[ind] = vi_vec[ind] + ai * dt;
}
}
//##############################################################################
__global__ void update_mat(cuDoubleComplex *matA, cuDoubleComplex *matB,
int dim){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
int dim2 = dim * dim;
if (ind < dim2){
matA[ind] = matB[ind];
}
return;
}
//##############################################################################
__global__ void update_vec(double *vecA, double *vecB, int dim){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
if (ind < dim){
vecA[ind] = vecB[ind];
}
return;
}
//##############################################################################
void init_cuda(complex<double> *H_tot, complex<double> *mu_tot,
double *v_bath_mat, double *fb_vec, double *xi_vec,
double *vi_vec, double *ki_vec,
complex<double> *rho_tot,
complex<double> *rho_phon, complex<double> *dVdX_mat,
UNINT n_el, UNINT n_phon, UNINT np_levels, UNINT n_tot,
UNINT n_bath){
double gaux = (double) (n_tot*n_tot);
double taux = (double) Nthreads;
Ncores1 = (UNINT) ceil(gaux/taux);
gaux = (double) (n_bath);
Ncores2 = (UNINT) ceil(gaux/taux);
int dimaux = n_phon * n_phon * np_levels * np_levels;
cudaMalloc((void**) &dev_Htot1 , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_Htot2 , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_Htot3 , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_mutot , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_rhotot , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_rhonew , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_rhoaux , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_Drho , n_tot*n_tot*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_rhophon, dimaux*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_dvdx , dimaux*sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_vbath , n_tot*n_tot*sizeof(double));
cudaMalloc((void**) &dev_fb , n_el*sizeof(double));
cudaMalloc((void**) &dev_xi , n_bath*sizeof(double));
cudaMalloc((void**) &dev_vi , n_bath*sizeof(double));
cudaMalloc((void**) &dev_ki , n_bath*sizeof(double));
cudaMalloc((void**) &dev_xf , n_bath*sizeof(double));
cudaMalloc((void**) &dev_vf , n_bath*sizeof(double));
cudaMalloc((void**) &dev_xh , n_bath*sizeof(double));
cudaMemcpy(dev_Htot1, H_tot, n_tot*n_tot*sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_mutot, mu_tot, n_tot*n_tot*sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_rhotot, rho_tot, n_tot*n_tot*sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_rhophon, rho_phon, dimaux*sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_dvdx, dVdX_mat, dimaux*sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_vbath, v_bath_mat, n_tot*n_tot*sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_fb, fb_vec, n_el*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_xi, xi_vec, n_bath*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_vi, vi_vec, n_bath*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ki, ki_vec, n_bath*sizeof(double), cudaMemcpyHostToDevice);
return;
}
//##############################################################################
void free_cuda_memory(){
cudaFree(dev_Htot1);
cudaFree(dev_Htot2);
cudaFree(dev_Htot3);
cudaFree(dev_mutot);
cudaFree(dev_rhotot);
cudaFree(dev_rhonew);
cudaFree(dev_rhoaux);
cudaFree(dev_Drho);
cudaFree(dev_rhophon);
cudaFree(dev_dvdx);
cudaFree(dev_vbath);
cudaFree(dev_fb);
cudaFree(dev_ki);
cudaFree(dev_xi);
cudaFree(dev_xf);
cudaFree(dev_xh);
cudaFree(dev_vi);
cudaFree(dev_vf);
return;
}
//##############################################################################
void matmul_cublas(cuDoubleComplex *dev_A, cuDoubleComplex *dev_B,
cuDoubleComplex *dev_C, int dim){
const cuDoubleComplex alf = make_cuDoubleComplex(1.0,0.0);
const cuDoubleComplex bet = make_cuDoubleComplex(0.0, 0.0);
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta = &bet;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication
cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, dev_A,
dim, dev_B, dim, beta, dev_C, dim);
// Destroy the handle
cublasDestroy(handle);
return;
}
//##############################################################################
void commute_cuda(cuDoubleComplex *dev_A, cuDoubleComplex *dev_B,
cuDoubleComplex *dev_C, int dim, const cuDoubleComplex alf){
const cuDoubleComplex bet1 = make_cuDoubleComplex(0.0, 0.0);
const cuDoubleComplex bet2 = make_cuDoubleComplex(-1.0, 0.0);
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta1 = &bet1;
const cuDoubleComplex *beta2 = &bet2;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Computing B.A
cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, dev_B,
dim, dev_A, dim, beta1, dev_C, dim);
// Computing A.B - B.A
cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, dev_A,
dim, dev_B, dim, beta2, dev_C, dim);
// Destroy the handle
cublasDestroy(handle);
return;
}
//##############################################################################
void matadd_cublas(cuDoubleComplex *dev_A, cuDoubleComplex *dev_B,
cuDoubleComplex *dev_C, int dim, const cuDoubleComplex alf,
const cuDoubleComplex bet){
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta = &bet;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication
cublasZgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, alpha, dev_A,
dim, beta ,dev_B, dim, dev_C, dim);
// Destroy the handle
cublasDestroy(handle);
return;
}
//##############################################################################
double get_trace_cuda(cuDoubleComplex *dev_A, UNINT dim){
// cuDoubleComplex aux1= make_cuDoubleComplex(0.0e0, 0.0e0);
complex<double> aux1;
double aux2;
complex<double> aux_vec[dim];
cuDoubleComplex *dev_vec;
cudaMalloc((void**) &dev_vec, dim * sizeof(cuDoubleComplex));
get_diag<<<Ncores1, Nthreads>>>(dev_A, dev_vec, dim);
cudaMemcpy(aux_vec, dev_vec, dim*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
for(int ii=0;ii<dim;ii++){
aux1 += aux_vec[ii];
}
aux2 = aux1.real();
cudaFree(dev_vec);
return aux2;
}
//##############################################################################
void include_Hceed_cuda(cuDoubleComplex *dev_Hout, cuDoubleComplex *dev_Hin,
cuDoubleComplex *dev_mu, cuDoubleComplex *dev_rhoin,
double a_ceed, int n_tot){
int dim2 = n_tot*n_tot;
double dmu2;
cuDoubleComplex *dev_aux1, *dev_aux2, *dev_Hceed;
const cuDoubleComplex alf = make_cuDoubleComplex(1.0,0.0);
cudaMalloc((void**) &dev_aux1, dim2 * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_aux2, dim2 * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_Hceed, dim2 * sizeof(cuDoubleComplex));
commute_cuda(dev_mu, dev_Hin, dev_Hceed, n_tot, alf);
commute_cuda(dev_Hceed, dev_Hin, dev_aux1, n_tot, alf);
matmul_cublas(dev_rhoin, dev_aux1, dev_aux2, n_tot);
dmu2 = get_trace_cuda(dev_aux2, n_tot);
const cuDoubleComplex bet = make_cuDoubleComplex(0.0, a_ceed*dmu2);
matadd_cublas(dev_Hin, dev_Hceed, dev_Hout, n_tot, alf, bet);
cudaFree(dev_aux1);
cudaFree(dev_aux2);
cudaFree(dev_Hceed);
return;
}
//##############################################################################
double get_Qforces_cuda(cuDoubleComplex *dev_rhoin ,double *fb_vec,
UNINT n_el, UNINT n_phon, UNINT np_levels, UNINT n_tot){
UNINT dim1 = np_levels * n_phon;
UNINT dim2 = dim1 * dim1;
double qforce = 0.0e0;
complex<double> aux_vec[n_tot];
cuDoubleComplex *dev_vec, *dev_mat;
cudaMalloc((void**) &dev_vec, n_tot * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_mat, dim2 * sizeof(cuDoubleComplex));
get_diag<<<Ncores1, Nthreads>>>(dev_rhoin, dev_vec, n_tot);
cudaMemcpy(aux_vec, dev_vec, n_tot*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
for (int kk=0; kk<n_el; kk++){
for (int ii=0; ii<dim1; ii++){
qforce += -aux_vec[ii+kk*dim1].real() * fb_vec[kk];
}
}
build_rhophon<<<Ncores1, Nthreads>>>(dev_rhoin, dev_rhophon, n_el , n_phon,
np_levels, n_tot);
matmul_cublas(dev_rhophon, dev_dvdx, dev_mat, dim1);
get_diag<<<Ncores1, Nthreads>>>(dev_mat, dev_vec, dim1);
cudaMemcpy(aux_vec, dev_vec, n_tot*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
for (int ii=0; ii<dim1; ii++){
qforce += -aux_vec[ii].real();
}
cudaFree(dev_vec);
cudaFree(dev_mat);
return qforce;
}
//##############################################################################
void runge_kutta_propagator_cuda(double mass_bath, double a_ceed, double dt,
double Efield, double Efieldaux,
double *fb_vec, int tt, UNINT n_el,
UNINT n_phon, UNINT np_levels,
UNINT n_tot, UNINT n_bath){
const cuDoubleComplex alf1 = make_cuDoubleComplex(0.0e0, -0.5*dt);
const cuDoubleComplex alf2 = make_cuDoubleComplex(0.0e0, -dt);
const cuDoubleComplex alf3 = make_cuDoubleComplex(1.0e0, 0.0e0);
double *dev_partialvec;
double partialvec[Ncores2];
double sum_xi;
double dth = 0.5e0 * dt;
double qforce;
//double time = dt * tt;
cudaMalloc((void**) &dev_partialvec, Ncores2*sizeof(double));
//Calculating the sum of all the coordintes of the bath----------------------
get_partial_sum<<<Ncores2, Nthreads>>>(dev_xi, dev_partialvec, n_bath);
cudaMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
cudaMemcpyDeviceToHost);
sum_xi = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
sum_xi += partialvec[ii];
}
//---------------------------------------------------------------------------
//Efield_t = Efield * exp(-pow(((time-10.0)/0.2),2.0));
//Building the new Hamiltonian at time = t ----------------------------------
update_H_tot<<<Ncores1, Nthreads>>>(dev_Htot2, dev_Htot1, dev_mutot,
dev_vbath, dev_fb, sum_xi, Efield,
n_el, n_phon, np_levels, n_tot);
//Including CEED Hamiltonian:
include_Hceed_cuda(dev_Htot3, dev_Htot2, dev_mutot, dev_rhotot, a_ceed,
n_tot);
//---------------------------------------------------------------------------
//Calculating rho(t+dt/2) using LvN------------------------------------------
commute_cuda(dev_Htot3, dev_rhotot, dev_Drho, n_tot, alf3);
matadd_cublas(dev_rhotot, dev_Drho, dev_rhoaux, n_tot, alf3, alf1);
//---------------------------------------------------------------------------
//Calculating x(t+dt/2) and v(t+dt/2) using the Quantum forces --------------
qforce = get_Qforces_cuda(dev_rhotot , fb_vec, n_el, n_phon, np_levels,
n_tot);
qforce = qforce/mass_bath;
move_x<<<Ncores2, Nthreads>>>(dev_xi, dev_vi, dev_xh, dth, n_bath);
move_v<<<Ncores2, Nthreads>>>(dev_xi, dev_vi, dev_ki, dev_vf, qforce, dth,
n_bath);
//---------------------------------------------------------------------------
//Hencefort we repeat everything to obtain everything in t + dt -------------
get_partial_sum<<<Ncores2, Nthreads>>>(dev_xh, dev_partialvec, n_bath);
cudaMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
cudaMemcpyDeviceToHost);
sum_xi = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
sum_xi += partialvec[ii];
}
//Efield_t = Efield * exp(-pow(((time+dth-10.0)/0.2),2.0));
update_H_tot<<<Ncores1, Nthreads>>>(dev_Htot2, dev_Htot1, dev_mutot,
dev_vbath, dev_fb, sum_xi, Efieldaux,
n_el, n_phon, np_levels, n_tot);
include_Hceed_cuda(dev_Htot3, dev_Htot2, dev_mutot, dev_rhoaux, a_ceed,
n_tot);
commute_cuda(dev_Htot3, dev_rhoaux, dev_Drho, n_tot, alf3);
matadd_cublas(dev_rhotot, dev_Drho, dev_rhonew, n_tot, alf3, alf2);
qforce = get_Qforces_cuda(dev_rhoaux , fb_vec, n_el, n_phon, np_levels,
n_tot);
qforce = qforce/mass_bath;
move_x<<<Ncores2, Nthreads>>>(dev_xi, dev_vf, dev_xf, dt, n_bath);
move_v<<<Ncores2, Nthreads>>>(dev_xh, dev_vi, dev_ki, dev_vf, qforce, dt,
n_bath);
//---------------------------------------------------------------------------
//We update rho, x and v:
update_mat<<<Ncores1, Nthreads>>>(dev_rhotot, dev_rhonew, n_tot);
update_vec<<<Ncores2, Nthreads>>>(dev_xi, dev_xf, n_bath);
update_vec<<<Ncores2, Nthreads>>>(dev_vi, dev_vf, n_bath);
cudaFree(dev_partialvec);
return;
}
//##############################################################################
void calcrhophon(cuDoubleComplex *dev_rhoin, int n_el, int n_phon,
int np_levels, int n_tot){
build_rhophon<<<Ncores1, Nthreads>>>(dev_rhoin, dev_rhophon, n_el , n_phon,
np_levels, n_tot);
return;
}
//##############################################################################
void getingmat(complex<double> *matA, cuDoubleComplex *dev_A, int n_tot){
cudaMemcpy(matA, dev_A, n_tot*n_tot*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
return;
}
//##############################################################################
void getting_printing_info(double *Ener, double *mu, complex<double> *tr_rho,
double *Ek_bath, complex<double> *rho_tot,
UNINT n_tot, UNINT n_bath){
int dim2 = n_tot * n_tot;
cuDoubleComplex *dev_aux1;
cuDoubleComplex *dev_vec;
double *dev_partialvec;
double partialvec[Ncores2];
cudaMalloc((void**) &dev_aux1, dim2 * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_vec, n_tot * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_partialvec, Ncores2*sizeof(double));
matmul_cublas(dev_rhotot, dev_Htot1, dev_aux1, n_tot);
*Ener = get_trace_cuda(dev_aux1, n_tot);
matmul_cublas(dev_rhotot, dev_mutot, dev_aux1, n_tot);
*mu = get_trace_cuda(dev_aux1, n_tot);
get_diag<<<Ncores1, Nthreads>>>(dev_rhotot, dev_vec, n_tot);
cudaMemcpy(tr_rho, dev_vec, n_tot*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
get_partial_Ek<<<Ncores2, Nthreads>>>(dev_vi, dev_partialvec, n_bath);
cudaMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
cudaMemcpyDeviceToHost);
*Ek_bath = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
*Ek_bath += 0.5e0 * partialvec[ii];
}
get_partial_sum<<<Ncores2, Nthreads>>>(dev_vi, dev_partialvec, n_bath);
cudaMemcpy(partialvec, dev_partialvec, Ncores2*sizeof(double),
cudaMemcpyDeviceToHost);
*xi_tot = 0.0e0;
for (int ii=1; ii<Ncores2; ii++){
*xi_tot += partialvec[ii];
}
cudaMemcpy(rho_tot, dev_rhotot, n_tot*n_tot*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
cudaFree(dev_vec);
cudaFree(dev_aux1);
cudaFree(dev_partialvec);
return;
}
//##############################################################################
|
a83118614f375037f1b0590f1e1be5a8c12ae968.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 16M
const unsigned int BLOCK_SIZE = 1024; // block size
const unsigned int GRID_SIZE = 8; // grid size
/*
*
* Many Block: every thread(totally thread number is BLOCK_SIZE*GRID_SIZE) exec DATE_SIZE/BLOCK_SIZE*GRID_SIZE computation task
* if BLOCK_SIZE*GRID_SIZE == DATE_SIZE, every thread exec 1 time)
*
* friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
* thread k compute column k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
*
* ThreadId: tid0 tid1 ... tidBLOCK_SIZE*GRID_SIZE-1
* -------------------------------------------------------------------------------------------------------
* DataId : dat0 dat1 ... datBLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 ... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+1 ... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1
*
* ...
*
* badly for global memory access(data space locality and benefit for cache line), adjacent thread does not access adjacent data addr space
* thread k compute row k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
* ThreadId:
* ---------------------------------------------------------------------------------------------------------------------------------
* DataId : dat0 dat1 ... datBLOCK_SIZE*GRID_SIZE-1 tid0
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 ... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1 tid1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+ ... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1 tid2
*
* ...
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x +blockDim.x * blockIdx.x;
int i = 0;
int result = 0;
// friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
for(i = tid; i < DATE_SIZE; i = i + BLOCK_SIZE * GRID_SIZE)
{
result += pInputData[i] * pInputData[i];
}
// badly for global memory access(data space locality and benefit for cache line), adjacent thread does not access adjacent data addr space
/*
* const int count = DATE_SIZE /(BLOCK_SIZE * GRID_SIZE);
* for( i = tid * count; i < (tid+1) * count; i++)
* {
* result += pInputData[i] * pInputData[i];
* }
*/
pResult[tid] = result;
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
hipGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
hipSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int *pHostThreadData = (int*)malloc(sizeof(int)*BLOCK_SIZE * GRID_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceResult, sizeof(int) * BLOCK_SIZE * GRID_SIZE));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyHostToDevice);
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, hipMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
hipLaunchKernelGGL(( SquareSum), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, pDeviceData, pDeviceResult);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
{
printf("%s\n", hipGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(hipMemcpy(pHostThreadData, pDeviceResult, sizeof(int) * BLOCK_SIZE * GRID_SIZE, hipMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(hipFree(pDeviceData));
HANDLE_CUDA_ERROR(hipFree(pDeviceResult));
// Add every thread result in CPU
TIME_TRACE_CPU_START(AddEveryThreadData);
for (int i = 0 ; i < BLOCK_SIZE * GRID_SIZE; i++)
{
hostResult += pHostThreadData[i];
}
TIME_TRACE_CPU_STOP(AddEveryThreadData);
// Print result
printf("Square Sum Computed Via Result GPU & CPU is %d.\n", hostResult);
// hipDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(hipDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostThreadData); pHostThreadData = NULL;
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
| a83118614f375037f1b0590f1e1be5a8c12ae968.cu | #include "stdio.h"
#include <cuda_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 16M
const unsigned int BLOCK_SIZE = 1024; // block size
const unsigned int GRID_SIZE = 8; // grid size
/*
*
* Many Block: every thread(totally thread number is BLOCK_SIZE*GRID_SIZE) exec DATE_SIZE/BLOCK_SIZE*GRID_SIZE computation task
* if BLOCK_SIZE*GRID_SIZE == DATE_SIZE, every thread exec 1 time)
*
* friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
* thread k compute column k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
*
* ThreadId: tid0 tid1 ... tidBLOCK_SIZE*GRID_SIZE-1
* -------------------------------------------------------------------------------------------------------
* DataId : dat0 dat1 ... datBLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 ... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+1 ... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1
*
* ...
*
* badly for global memory access(data space locality and benefit for cache line), adjacent thread does not access adjacent data addr space
* thread k compute row k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
* ThreadId:
* ---------------------------------------------------------------------------------------------------------------------------------
* DataId : dat0 dat1 ... datBLOCK_SIZE*GRID_SIZE-1 tid0
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 ... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1 tid1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+ ... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1 tid2
*
* ...
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x +blockDim.x * blockIdx.x;
int i = 0;
int result = 0;
// friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
for(i = tid; i < DATE_SIZE; i = i + BLOCK_SIZE * GRID_SIZE)
{
result += pInputData[i] * pInputData[i];
}
// badly for global memory access(data space locality and benefit for cache line), adjacent thread does not access adjacent data addr space
/*
* const int count = DATE_SIZE /(BLOCK_SIZE * GRID_SIZE);
* for( i = tid * count; i < (tid+1) * count; i++)
* {
* result += pInputData[i] * pInputData[i];
* }
*/
pResult[tid] = result;
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
cudaGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
cudaSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int *pHostThreadData = (int*)malloc(sizeof(int)*BLOCK_SIZE * GRID_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceResult, sizeof(int) * BLOCK_SIZE * GRID_SIZE));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyHostToDevice);
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, cudaMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
SquareSum<<<GRID_SIZE, BLOCK_SIZE>>>(pDeviceData, pDeviceResult);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(cudaMemcpy(pHostThreadData, pDeviceResult, sizeof(int) * BLOCK_SIZE * GRID_SIZE, cudaMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(cudaFree(pDeviceData));
HANDLE_CUDA_ERROR(cudaFree(pDeviceResult));
// Add every thread result in CPU
TIME_TRACE_CPU_START(AddEveryThreadData);
for (int i = 0 ; i < BLOCK_SIZE * GRID_SIZE; i++)
{
hostResult += pHostThreadData[i];
}
TIME_TRACE_CPU_STOP(AddEveryThreadData);
// Print result
printf("Square Sum Computed Via Result GPU & CPU is %d.\n", hostResult);
// cudaDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(cudaDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostThreadData); pHostThreadData = NULL;
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
|
2e2b55fde3cd1de596e4ba51e95ea62796560fbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <iostream>
#include <random>
#include <limits>
#include <vector>
#include <chrono>
constexpr auto VECTOR_LENGTH = 1024u * 1024u * 16u;
constexpr auto EPS = 1e-6f;
#define GPU_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
std::cout << "GPUassert: " << hipGetErrorString(code) << " " << file << ":"
<< line << "\n";
if (abort) {
std::exit(code);
}
}
}
float findMaxHost(const std::vector<float> &A) {
auto time1 = std::chrono::steady_clock::now();
auto it = std::max_element(std::begin(A), std::end(A));
auto time2 = std::chrono::steady_clock::now();
std::cout << "CPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
constexpr auto ELEMENTS_PER_WORKITEM = 8u;
constexpr auto WORKGROUP_SIZE = 128u;
constexpr auto ELEMENTS_PER_BLOCK = WORKGROUP_SIZE * ELEMENTS_PER_WORKITEM;
constexpr auto MIN_FLOAT = std::numeric_limits<float>::min();
__device__ void warpReduce(volatile float *shared, int tid) {
shared[tid] = fmaxf(shared[tid], shared[tid + 32]);
shared[tid] = fmaxf(shared[tid], shared[tid + 16]);
shared[tid] = fmaxf(shared[tid], shared[tid + 8]);
shared[tid] = fmaxf(shared[tid], shared[tid + 4]);
shared[tid] = fmaxf(shared[tid], shared[tid + 2]);
shared[tid] = fmaxf(shared[tid], shared[tid + 1]);
}
__global__ void maxKernel(float *A, float *result, int N) {
extern __shared__ float shared[];
int i = blockIdx.x * blockDim.x * ELEMENTS_PER_WORKITEM + threadIdx.x;
float max = MIN_FLOAT;
for (int j = 0; j < ELEMENTS_PER_WORKITEM; ++j) {
i += blockDim.x;
if (i < N) {
max = fmaxf(max, A[i]);
}
}
shared[threadIdx.x] = max;
__syncthreads();
for (int max_thread_id = blockDim.x / 2; max_thread_id > 32;
max_thread_id /= 2) {
if (threadIdx.x < max_thread_id) {
shared[threadIdx.x] =
fmaxf(shared[threadIdx.x], shared[threadIdx.x + max_thread_id]);
}
__syncthreads();
}
if (threadIdx.x < 32) {
warpReduce(shared, threadIdx.x);
}
if (threadIdx.x == 0) {
result[blockIdx.x] = shared[0];
}
}
float findMaxGPU(const std::vector<float> &A) {
float *A_gpu, *temp_gpu;
auto byte_size = VECTOR_LENGTH * sizeof(float);
GPU_CHECK(hipMalloc(&A_gpu, byte_size));
GPU_CHECK(hipMemcpy(A_gpu, A.data(), byte_size, hipMemcpyHostToDevice));
auto block_count = VECTOR_LENGTH / ELEMENTS_PER_BLOCK;
GPU_CHECK(hipMalloc(&temp_gpu, block_count * sizeof(float)));
GPU_CHECK(hipDeviceSynchronize());
auto time1 = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( maxKernel), dim3(block_count), dim3(WORKGROUP_SIZE), WORKGROUP_SIZE * sizeof(float), 0,
A_gpu, temp_gpu, VECTOR_LENGTH);
GPU_CHECK(hipDeviceSynchronize());
auto time2 = std::chrono::steady_clock::now();
std::vector<float> temp_host(block_count);
GPU_CHECK(hipMemcpy(temp_host.data(), temp_gpu, block_count * sizeof(float),
hipMemcpyDeviceToHost));
auto it = std::max_element(std::begin(temp_host), std::end(temp_host));
GPU_CHECK(hipFree(A_gpu));
GPU_CHECK(hipFree(temp_gpu));
std::cout << "GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
int main() {
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<float> dist{5, 2};
std::vector<float> A(VECTOR_LENGTH);
for (auto i = 0u; i < VECTOR_LENGTH; ++i) {
A[i] = dist(gen);
}
auto max_host = findMaxHost(A);
auto max_device = findMaxGPU(A);
if (std::abs(max_host - max_device) > EPS) {
std::cout << "ERROR\n";
std::cout << max_host << " : " << max_device << "\n";
return 1;
} else {
std::cout << "SUCCESS\n";
}
return 0;
}
| 2e2b55fde3cd1de596e4ba51e95ea62796560fbb.cu | #include <algorithm>
#include <cassert>
#include <cmath>
#include <iostream>
#include <random>
#include <limits>
#include <vector>
#include <chrono>
constexpr auto VECTOR_LENGTH = 1024u * 1024u * 16u;
constexpr auto EPS = 1e-6f;
#define GPU_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
std::cout << "GPUassert: " << cudaGetErrorString(code) << " " << file << ":"
<< line << "\n";
if (abort) {
std::exit(code);
}
}
}
float findMaxHost(const std::vector<float> &A) {
auto time1 = std::chrono::steady_clock::now();
auto it = std::max_element(std::begin(A), std::end(A));
auto time2 = std::chrono::steady_clock::now();
std::cout << "CPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
constexpr auto ELEMENTS_PER_WORKITEM = 8u;
constexpr auto WORKGROUP_SIZE = 128u;
constexpr auto ELEMENTS_PER_BLOCK = WORKGROUP_SIZE * ELEMENTS_PER_WORKITEM;
constexpr auto MIN_FLOAT = std::numeric_limits<float>::min();
__device__ void warpReduce(volatile float *shared, int tid) {
shared[tid] = fmaxf(shared[tid], shared[tid + 32]);
shared[tid] = fmaxf(shared[tid], shared[tid + 16]);
shared[tid] = fmaxf(shared[tid], shared[tid + 8]);
shared[tid] = fmaxf(shared[tid], shared[tid + 4]);
shared[tid] = fmaxf(shared[tid], shared[tid + 2]);
shared[tid] = fmaxf(shared[tid], shared[tid + 1]);
}
__global__ void maxKernel(float *A, float *result, int N) {
extern __shared__ float shared[];
int i = blockIdx.x * blockDim.x * ELEMENTS_PER_WORKITEM + threadIdx.x;
float max = MIN_FLOAT;
for (int j = 0; j < ELEMENTS_PER_WORKITEM; ++j) {
i += blockDim.x;
if (i < N) {
max = fmaxf(max, A[i]);
}
}
shared[threadIdx.x] = max;
__syncthreads();
for (int max_thread_id = blockDim.x / 2; max_thread_id > 32;
max_thread_id /= 2) {
if (threadIdx.x < max_thread_id) {
shared[threadIdx.x] =
fmaxf(shared[threadIdx.x], shared[threadIdx.x + max_thread_id]);
}
__syncthreads();
}
if (threadIdx.x < 32) {
warpReduce(shared, threadIdx.x);
}
if (threadIdx.x == 0) {
result[blockIdx.x] = shared[0];
}
}
float findMaxGPU(const std::vector<float> &A) {
float *A_gpu, *temp_gpu;
auto byte_size = VECTOR_LENGTH * sizeof(float);
GPU_CHECK(cudaMalloc(&A_gpu, byte_size));
GPU_CHECK(cudaMemcpy(A_gpu, A.data(), byte_size, cudaMemcpyHostToDevice));
auto block_count = VECTOR_LENGTH / ELEMENTS_PER_BLOCK;
GPU_CHECK(cudaMalloc(&temp_gpu, block_count * sizeof(float)));
GPU_CHECK(cudaDeviceSynchronize());
auto time1 = std::chrono::steady_clock::now();
maxKernel<<<block_count, WORKGROUP_SIZE, WORKGROUP_SIZE * sizeof(float)>>>(
A_gpu, temp_gpu, VECTOR_LENGTH);
GPU_CHECK(cudaDeviceSynchronize());
auto time2 = std::chrono::steady_clock::now();
std::vector<float> temp_host(block_count);
GPU_CHECK(cudaMemcpy(temp_host.data(), temp_gpu, block_count * sizeof(float),
cudaMemcpyDeviceToHost));
auto it = std::max_element(std::begin(temp_host), std::end(temp_host));
GPU_CHECK(cudaFree(A_gpu));
GPU_CHECK(cudaFree(temp_gpu));
std::cout << "GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
int main() {
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<float> dist{5, 2};
std::vector<float> A(VECTOR_LENGTH);
for (auto i = 0u; i < VECTOR_LENGTH; ++i) {
A[i] = dist(gen);
}
auto max_host = findMaxHost(A);
auto max_device = findMaxGPU(A);
if (std::abs(max_host - max_device) > EPS) {
std::cout << "ERROR\n";
std::cout << max_host << " : " << max_device << "\n";
return 1;
} else {
std::cout << "SUCCESS\n";
}
return 0;
}
|
3c77807621afe650e22741f6f2452927db98c6ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "nOCTcudaDLLHeader.cuh"
// status and control parameters
static int gnMode = -1;
static int gnAllocationStatus = 0;
static bool gbIsReferenceRecorded = false;
static bool gbIsReferenceLoaded = false;
static bool gbIsCalibrationLoaded = false;
static bool gbIsDispersionLoaded = false;
static int gnRawLineLength;
static int gnRawNumberLines; // number of lines in a frame
// static int gnCalibrationNumberLines;
static int gnProcessNumberLines; // number of lines in a chunk
static int gnProcessedNumberLines;
static int gnPerpendicular;
static int gnMidLength;
/* raw spectra arrays */
// common
static short* d_gpnRawIMAQ; // device: raw spectra from camera
static float* d_gpfRawIMAQ; // device: raw spectra (gpfRawCalibration)
static float* gpfIMAQPitched; // device: raw spectra copied to pitched memory (gpfProcessCalibration)
static size_t gnIMAQPitch; // gnProcessCalibrationPitch
// PS-SD-OCT
static short* d_gpnRawIMAQParallel; // device: raw spectra from camera
static float* d_gpfRawIMAQParallel; // device: raw spectra (gpfRawCalibration)
static short* d_gpnRawIMAQPerpendicular; // device: raw spectra from camera
static float* d_gpfRawIMAQPerpendicular; // device: raw spectra (gpfRawCalibration)
/* reference */
// common
static float* gpfReference;
// PS-SD-OCT
static float* gpfReferenceParallel;
static float* gpfReferencePerpendicular;
/* fft */
static hipfftHandle gchForward;
static hipfftComplex* gpcProcessDepthProfile;
static size_t gnProcessDepthProfilePitch;
// calibration mask
static int gnCalibrationStart;
static int gnCalibrationStop;
static int gnCalibrationRound;
static float* gpfCalibrationMask;
// reverse fft
static hipfftComplex* gpcProcessSpectrum;
static size_t gnProcessSpectrumPitch;
static hipfftHandle gchReverse;
// phase
static float* gpfProcessPhase;
static size_t gnProcessPhasePitch;
// unwrap
static float gfPiEps = (float)(acos(-1.0) - 1.0e-30);
static float gf2Pi = (float)(2.0 * acos(-1.0));
/* linear fit and interpolation */
// common
static float* gpfLeftPhase;
static float* gpfRightPhase;
static float* gpfKLineCoefficients;
static float* gpfProcessK;
static size_t gnKPitch;
static int* gpnProcessIndex;
static size_t gnIndexPitch;
static int* gpnProcessAssigned;
static size_t gnAssignedPitch;
static int gnKMode;
static float* gpfProcessSpectrumK;
static size_t gnSpectrumKPitch;
// PS-SD-OCT
static float* gpfProcessKParallel;
static float* gpfProcessKPerpendicular;
static int* gpnProcessIndexParallel;
static int* gpnProcessIndexPerpendicular;
static size_t gnKPitchParallel;
static size_t gnKPitchPerpendicular;
static size_t gnIndexPitchParallel;
static size_t gnIndexPitchPerpendicular;
// dispersion mask
static int gnDispersionStart;
static int gnDispersionStop;
static int gnDispersionRound;
static float* gpfDispersionMask;
// dispersion correction
static hipfftComplex* gpcDispersionCorrection;
static hipfftHandle gchForwardComplex;
static hipfftComplex* gpcProcessKCorrected;
static size_t gnKCorrectedPitch;
// static float* gpfProcessOCT;
static size_t gnProcessOCTPitch;
static hipfftComplex* gpcProcessedOCT;
// PS-SD-OCT
static hipfftComplex* gpcProcessedOCTParallelOdd;
static hipfftComplex* gpcProcessedOCTParallelEven;
static hipfftComplex* gpcProcessedOCTPerpendicularOdd;
static hipfftComplex* gpcProcessedOCTPerpendicularEven;
int getDeviceCount(int* nNumberDevices) {
// check for GPU
int nDevices = -1;
int nRet = hipGetDeviceCount(&nDevices);
if (nRet == hipSuccess)
*(nNumberDevices) = nDevices;
return nRet;
}
int getDeviceName(int nDeviceNumber, char* strDeviceName) {
// check for GPU
hipDeviceProp_t currentDevice;
int nRet = hipGetDeviceProperties(¤tDevice, nDeviceNumber);
if (nRet == hipSuccess) {
sprintf(strDeviceName, "%s (%d SMs, %d b/s, %d t/b, %d t/s, %d shared kB, %d GB)",
currentDevice.name,
currentDevice.multiProcessorCount,
currentDevice.maxBlocksPerMultiProcessor,
currentDevice.maxThreadsPerBlock,
currentDevice.maxThreadsPerMultiProcessor,
currentDevice.sharedMemPerBlock / 1024,
currentDevice.totalGlobalMem / 1024 / 1024 / 1024);
} // if (nRet
return nRet;
}
int checkStatus() {
return -1;
}
int initialize(int nMode, int nRawLineLength, int nRawNumberLines, int nProcessNumberLines, int nProcessedNumberLines) {
cleanup(nMode);
// copy parameters to global parameters
gnMode = nMode;
gnRawLineLength = nRawLineLength;
gnRawNumberLines = nRawNumberLines; // number of lines in a frame
gnProcessNumberLines = nProcessNumberLines; // number of lines in a chunk
gnProcessedNumberLines = nProcessedNumberLines;
int nActualProcessNumberLines;
// allocate memory
switch (nMode) {
case 0: // SD-OCT
gnPerpendicular = 0;
// gnCalibrationNumberLines = 1;
nActualProcessNumberLines = gnProcessNumberLines;
// gpuErrchk(hipHostMalloc((void**)&h_gpnRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(hipMalloc((void**)&d_gpnRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(hipMalloc((void**)&d_gpfRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(hipMalloc((void**)&gpfReference, gnRawLineLength * sizeof(float)));
break;
case 1: // PS SD-OCT
gnPerpendicular = 1;
// gnCalibrationNumberLines = gnRawNumberLines; // QUESTION: what is this parameter?
nActualProcessNumberLines = gnProcessNumberLines >> 1; // only process every other line in each array
gpuErrchk(hipMalloc((void**)&d_gpnRawIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(hipMalloc((void**)&d_gpfRawIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(hipMalloc((void**)&d_gpnRawIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(hipMalloc((void**)&d_gpfRawIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(hipMalloc((void**)&gpfReferenceParallel, gnRawLineLength * sizeof(float)));
gpuErrchk(hipMalloc((void**)&gpfReferencePerpendicular, gnRawLineLength * sizeof(float)));
gpuErrchk(hipMallocPitch((void**)&gpfProcessKParallel, &gnKPitchParallel, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpfProcessKPerpendicular, &gnKPitchPerpendicular, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpnProcessIndexParallel, &gnIndexPitchParallel, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpnProcessIndexPerpendicular, &gnIndexPitchPerpendicular, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(hipMalloc((void**)&gpcProcessedOCTParallelOdd, (gnMidLength * gnProcessedNumberLines) * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&gpcProcessedOCTParallelEven, (gnMidLength * gnProcessedNumberLines) * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&gpcProcessedOCTPerpendicularOdd, (gnMidLength * gnProcessedNumberLines) * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&gpcProcessedOCTPerpendicularEven, (gnMidLength * gnProcessedNumberLines) * sizeof(hipfftComplex)));
break;
case 2: // line field
gnPerpendicular = 0;
// gnCalibrationNumberLines = 1;
break;
case 3: // OFDI
gnPerpendicular = 0;
// gnCalibrationNumberLines = gnRawNumberLines;
break;
case 4: // PS OFDI
gnPerpendicular = 1;
// gnCalibrationNumberLines = gnRawNumberLines;
break;
} // switch (nMode)
gpuErrchk(hipMallocPitch((void**)&gpfIMAQPitched, &gnIMAQPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gnMidLength = (int)(gnRawLineLength / 2 + 1);
gpuErrchk(hipMallocPitch((void**)&gpcProcessDepthProfile, &gnProcessDepthProfilePitch, gnRawLineLength * sizeof(hipfftComplex), nActualProcessNumberLines));
int nRank = 1;
int pn[] = { gnRawLineLength };
int nIStride = 1, nOStride = 1;
int nIDist = gnIMAQPitch / sizeof(float);
int nODist = gnProcessDepthProfilePitch / sizeof(hipfftComplex);
int pnINEmbed[] = { 0 };
int pnONEmbed[] = { 0 };
int nBatch = gnProcessNumberLines >> 1;
hipfftPlanMany(&gchForward, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, HIPFFT_R2C, nBatch);
gpuErrchk(hipMalloc((void**)&gpfCalibrationMask, gnRawLineLength * sizeof(float)));
gpuErrchk(hipMallocPitch((void**)&gpcProcessSpectrum, &gnProcessSpectrumPitch, gnRawLineLength * sizeof(hipfftComplex), nActualProcessNumberLines));
nIDist = gnProcessDepthProfilePitch / sizeof(hipfftComplex);
nODist = gnProcessSpectrumPitch / sizeof(hipfftComplex);
hipfftPlanMany(&gchReverse, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, HIPFFT_C2C, nBatch);
gpuErrchk(hipMallocPitch((void**)&gpfProcessPhase, &gnProcessPhasePitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
hipMalloc((void**)&gpfLeftPhase, sizeof(float));
hipMalloc((void**)&gpfRightPhase, sizeof(float));
hipMalloc((void**)&gpfKLineCoefficients, 2 * sizeof(float));
gpuErrchk(hipMallocPitch((void**)&gpfProcessK, &gnKPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpnProcessIndex, &gnIndexPitch, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpnProcessAssigned, &gnAssignedPitch, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(hipMallocPitch((void**)&gpfProcessSpectrumK, &gnSpectrumKPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
// gpuErrchk(hipMallocPitch((void**)&gpfProcessOCT, &gnProcessOCTPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(hipHostMalloc((void**)&gpcProcessedOCT, (gnMidLength * gnProcessedNumberLines) * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&gpfDispersionMask, gnRawLineLength * sizeof(float)));
gpuErrchk(hipMalloc((void**)&gpcDispersionCorrection, gnRawLineLength * sizeof(hipfftComplex)));
gpuErrchk(hipMallocPitch((void**)&gpcProcessKCorrected, &gnKCorrectedPitch, gnRawLineLength * sizeof(hipfftComplex), nActualProcessNumberLines));
nIDist = gnKCorrectedPitch / sizeof(hipfftComplex);
hipfftPlanMany(&gchForwardComplex, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, HIPFFT_C2C, nBatch);
gpuErrchk(hipDeviceSynchronize()); // QUESTION: will hipDeviceSynchronize slow down the performance?
gnAllocationStatus = 1;
return -1;
} // int initialize
int cleanup(int nMode) {
// free memory allocations
if (gnAllocationStatus == 1) {
switch (nMode)
{
case 0: // SD-OCT
// gpuErrchk(hipHostFree(h_gpnRawIMAQ));
gpuErrchk(hipFree(d_gpnRawIMAQ));
gpuErrchk(hipFree(d_gpfRawIMAQ));
gpuErrchk(hipFree(gpfReference));
break;
case 1: // PS SD-OCT
gpuErrchk(hipFree(d_gpnRawIMAQParallel));
gpuErrchk(hipFree(d_gpfRawIMAQParallel));
gpuErrchk(hipFree(d_gpnRawIMAQPerpendicular));
gpuErrchk(hipFree(d_gpfRawIMAQPerpendicular));
gpuErrchk(hipFree(gpfReferenceParallel));
gpuErrchk(hipFree(gpfReferencePerpendicular));
gpuErrchk(hipFree(gpfProcessKParallel));
gpuErrchk(hipFree(gpfProcessKPerpendicular));
gpuErrchk(hipFree(gpnProcessIndexParallel));
gpuErrchk(hipFree(gpnProcessIndexPerpendicular));
break;
case 2: // line field
break;
case 3: // OFDI
break;
case 4: // PS OFDI
break;
break;
}
gpuErrchk(hipFree(gpfIMAQPitched));
gpuErrchk(hipFree(gpcProcessDepthProfile));
hipfftDestroy(gchForward);
gpuErrchk(hipFree(gpfCalibrationMask));
gpuErrchk(hipFree(gpcProcessSpectrum));
hipfftDestroy(gchReverse);
gpuErrchk(hipFree(gpfProcessPhase));
hipFree(gpfLeftPhase);
hipFree(gpfRightPhase);
hipFree(gpfKLineCoefficients);
hipFree(gpfProcessK);
hipFree(gpnProcessIndex);
hipFree(gpnProcessAssigned);
hipFree(gpfProcessSpectrumK);
// hipFree(gpfProcessOCT);
hipHostFree(gpcProcessedOCT);
gpuErrchk(hipFree(gpfDispersionMask));
gpuErrchk(hipFree(gpcDispersionCorrection));
hipfftDestroy(gchForwardComplex);
hipFree(gpcProcessKCorrected);
gnAllocationStatus = 0;
} // if (gnAllocationStatus
return -1;
}
int getReferenceData(int nMode, short* pnReferenceParallel, short* pnReferencePerpendicular, bool bIsReferenceRecorded) {
// copy parameters to global parameters
gbIsReferenceRecorded = bIsReferenceRecorded;
if (bIsReferenceRecorded == true) {
switch (nMode) {
case 0: // SD-OCT
// data type conversion (on host)
float* pfReference;
pfReference = (float*)malloc(gnRawLineLength * sizeof(float));
for (int i; i < gnRawLineLength; i++) {
pfReference[i] = (float)pnReferenceParallel[i];
}
// copy data to device
gpuErrchk(hipMemcpy(gpfReference, pfReference, gnRawLineLength * sizeof(short), hipMemcpyHostToDevice));
gpuErrchk(hipDeviceSynchronize());
free(pfReference);
gbIsReferenceLoaded = true;
break;
case 1: // PS SD-OCT
// data type conversion (on host)
float* pfReferenceParallel, * pfReferencePerpendicular;
pfReferenceParallel = (float*)malloc(gnRawLineLength * sizeof(float));
pfReferencePerpendicular = (float*)malloc(gnRawLineLength * sizeof(float));
for (int i; i < gnRawLineLength; i++) {
pfReferenceParallel[i] = (float)pnReferenceParallel[i];
pfReferencePerpendicular[i] = (float)pnReferencePerpendicular[i];
}
// copy data to device
gpuErrchk(hipMemcpy(gpfReferenceParallel, pfReferenceParallel, gnRawLineLength * sizeof(short), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(gpfReferencePerpendicular, pfReferencePerpendicular, gnRawLineLength * sizeof(short), hipMemcpyHostToDevice));
gpuErrchk(hipDeviceSynchronize());
free(pfReferenceParallel);
free(pfReferencePerpendicular);
gbIsReferenceLoaded = true;
break;
case 2: // line field
break;
case 3: // OFDI
break;
case 4: // PS OFDI
break;
} // switch (nMode)
}
return -1;
}
int getCalibrationData() {
return -1;
}
int getDataSDOCT(void* pnIMAQ) {
return -1;
}
int getDataPSSDOCT(void* pnIMAQParallel, void* pnIMAQPerpendicular) {
// copy data to device
gpuErrchk(hipMemcpy(d_gpnRawIMAQParallel, pnIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(short), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_gpnRawIMAQPerpendicular, pnIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(short), hipMemcpyHostToDevice));
gpuErrchk(hipDeviceSynchronize());
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
// convert to float type
d3Threads.x = 512; d3Threads.y = 1; d3Threads.z = 1;
d3Blocks.x = (gnRawLineLength * gnRawNumberLines - 1) / d3Threads.x + 1;
d3Blocks.y = 1; d3Blocks.z = 1;
convert2Float << <d3Blocks, d3Threads >> > (d_gpnRawIMAQParallel, d_gpfRawIMAQParallel, gnRawLineLength * gnRawNumberLines);
gpuErrchk(hipPeekAtLastError());
convert2Float << <d3Blocks, d3Threads >> > (d_gpnRawIMAQPerpendicular, d_gpfRawIMAQPerpendicular, gnRawLineLength * gnRawNumberLines);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
return -1;
}
int calculateSpectralDomainCalibration(int nMode) { // can be used in both SD-OCT and PS SD-OCT, not in DLL
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
int nActualProcessNumberLines;
switch (nMode) {
case 0: // SD-OCT
nActualProcessNumberLines = gnProcessNumberLines;
break;
case 1: // PS SD-OCT
nActualProcessNumberLines = gnProcessNumberLines >> 1;
break;
}
/********** calibration ************/
/* forward fft */
gpuErrchk(hipMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, nActualProcessNumberLines));
hipfftExecR2C(gchForward, gpfIMAQPitched, gpcProcessDepthProfile);
/* mask */
// calculate mask: QUESTION can be done in CPU in the initialize function? (small data size, avoid warp divergence)
nThreadsPerBlock = 512;
calculateMask << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfCalibrationMask, gnRawLineLength, 50, 100, 16); // grab these numbers from C# UI
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyMask << <d3Blocks, d3Threads >> > (gpcProcessDepthProfile, gpfCalibrationMask, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
/* reverse fft */
hipfftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, HIPFFT_BACKWARD);
/* calculate phase */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = nActualProcessNumberLines / d3Threads.y;
d3Blocks.z = 1;
calculatePhase << <d3Blocks, d3Threads >> > (gpcProcessSpectrum, gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
d3Threads.x = 256;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
unwrapPhase << <d3Blocks, d3Threads >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, gfPiEps, gf2Pi);
d3Threads.x = 256;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
matchPhase << <d3Blocks, d3Threads >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, gf2Pi);
nThreadsPerBlock = 32;
getPhaseLimits << <2, nThreadsPerBlock >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, 32, gnRawLineLength - 32, gpfLeftPhase, gpfRightPhase);
gnKMode = 1;
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
hipMemset2D(gpnProcessAssigned, gnAssignedPitch, 0, gnRawLineLength * sizeof(int), nActualProcessNumberLines);
calculateK << <d3Blocks, d3Threads >> > (gpfProcessPhase, gpfProcessK, gpnProcessAssigned, gpnProcessIndex, nActualProcessNumberLines, gnRawLineLength, \
gpfKLineCoefficients, 32, gnRawLineLength - 32, gpfLeftPhase, gpfRightPhase, gnKMode);
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
cleanIndex << <d3Blocks, d3Threads >> > (gpfProcessK, gpnProcessIndex, gpnProcessAssigned, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(hipDeviceSynchronize());
return -1;
}
int outputCalibrationPSSDOCT(void* pnIMAQParallel, void* pnIMAQPerpendicular, float* pfKParallel, float* pfKPerpendicular, int* pnIndexParallel, int* pnIndexPerpendicular) {
// output pfK, pnIndex and return to C# and save
// get a frame of data
getDataPSSDOCT(pnIMAQParallel, pnIMAQPerpendicular);
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
int nActualProcessNumberLines = gnProcessNumberLines >> 1; // half of a chunk
// allocate host arrays
pfKParallel = (float*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(float));
pfKPerpendicular = (float*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(float));
pnIndexParallel = (int*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(int));
pnIndexPerpendicular = (int*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(int));
// loop through cameras
for (int nCam = 0; nCam < 2; nCam++) { // nCam = 0: parallel camera; nCam = 1: perpendicular camera
/* copy data */
switch (nCam) {
case 0: // parallel camera
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel, gnIMAQPitch, gnIMAQPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice));
break;
case 1: // perpendicular camera
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular, gnIMAQPitch, gnIMAQPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice));
break;
}
gpuErrchk(hipDeviceSynchronize());
/* reference */
if (gbIsReferenceRecorded == false) { // no reference data recorded
// calculate reference
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nActualProcessNumberLines, gnRawLineLength);
break;
case 1: // perpendicular camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nActualProcessNumberLines, gnRawLineLength);
break;
}
gpuErrchk(hipPeekAtLastError());
} // if (gbIsReferenceRecorded == false)
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nActualProcessNumberLines, gnRawLineLength);
break;
case 1: // perpendicular camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nActualProcessNumberLines, gnRawLineLength);
break;
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
/* calibration */
calculateSpectralDomainCalibration(1); // nMode = 1: PS SD-OCT
// gpfProcessK, gpnProcessIndex, gpnProcessAssigned
// output calibration parameters
switch (nCam) {
case 0: // parallel camera
gpuErrchk(hipMemcpy2D(gpfProcessKParallel, gnKPitchParallel, gpfProcessK, gnKPitch, gnKPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice)); // QUESTION: ok if only copy one line?
gpuErrchk(hipMemcpy2D(gpnProcessIndexParallel, gnIndexPitchParallel, gpnProcessIndex, gnIndexPitch, gnIndexPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice));
break;
case 1: // perpendicular camera
gpuErrchk(hipMemcpy2D(gpfProcessKPerpendicular, gnKPitchPerpendicular, gpfProcessK, gnKPitch, gnKPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice)); // QUESTION: ok if only copy one line?
gpuErrchk(hipMemcpy2D(gpnProcessIndexPerpendicular, gnIndexPitchPerpendicular, gpnProcessIndex, gnIndexPitch, gnIndexPitch, nActualProcessNumberLines, hipMemcpyDeviceToDevice));
} // switch (nCam)
gpuErrchk(hipDeviceSynchronize());
} // for (int nCam = 0; nCam < 2; nCam++)
// copy data to host for output
gpuErrchk(hipMemcpy(pfKParallel, gpfProcessKParallel, gnRawLineLength * nActualProcessNumberLines * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(pfKPerpendicular, gpfProcessKPerpendicular, gnRawLineLength * nActualProcessNumberLines * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(pnIndexParallel, gpnProcessIndexParallel, gnRawLineLength * nActualProcessNumberLines * sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(pnIndexPerpendicular, gpnProcessIndexPerpendicular, gnRawLineLength * nActualProcessNumberLines * sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipDeviceSynchronize());
return -1;
}
int processSDOCT() {
return -1;
}
int processPSSDOCT() {
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
// loop through cameras
for (int nCam = 0; nCam < 2; nCam++) { // nCam = 0: parallel camera; nCam = 1: perpendicular camera
int nNumberLinesPerChunk = gnProcessNumberLines; // value set in C# UI
int nNumberChunks = (gnRawNumberLines - 1) / gnProcessNumberLines + 1; // QUESTION: need to double check. why previous method?
// loop through chunks
for (int nChunk = 0; nChunk < nNumberChunks; nChunk++) {
// loop through even and odd lines, respectively
int nSrcPtrOffset = nChunk * (gnRawLineLength * nNumberLinesPerChunk);
for (int nOddEven = 0; nOddEven < 2; nOddEven++) { // nOddEven = 0: process even lines; nOddEven = 1; process odd lines
// copy a chunk: in each data array (on device now), copy every other line
switch (nCam) {
case 0: // parallel camera
switch (nOddEven) {
case 0: // even lines
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, hipMemcpyDeviceToDevice));
break;
case 1: // odd lines
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel + gnRawLineLength + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, hipMemcpyDeviceToDevice));
break;
} // switch (nOddEven)
break;
case 1: // perpendicular camera
switch (nOddEven) {
case 0: // even lines
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, hipMemcpyDeviceToDevice));
break;
case 1: // odd lines
gpuErrchk(hipMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular + gnRawLineLength + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, hipMemcpyDeviceToDevice));
break;
} // switch (nOddEven)
break;
}
gpuErrchk(hipDeviceSynchronize());
/* reference */
if (gbIsReferenceRecorded == false) { // no reference data recorded
// calculate reference
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
case 1: // perpendicular camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
}
gpuErrchk(hipPeekAtLastError());
} // if (gbIsReferenceRecorded == false)
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
case 1: // perpendicular camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
}
// now gpfIMAQPitched is the result of reference subtraction (fringes), SAME AS gpfProcessOCT
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
/* calibration */
if (gbIsCalibrationLoaded == false) {
calculateSpectralDomainCalibration(1); // nMode = 1: PS SD-OCT
// gpfProcessK, gpnProcessIndex, gpnProcessAssigned
}
/* interpolation */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (nNumberLinesPerChunk >> 1) / d3Threads.y;
d3Blocks.z = 1;
interpCubicSpline << <d3Blocks, d3Threads >> > (gpfProcessK, gpnProcessIndex, gpfIMAQPitched, gpfProcessSpectrumK, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(hipDeviceSynchronize());
/* forward fft */
gpuErrchk(hipMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, nNumberLinesPerChunk >> 1));
gpuErrchk(hipDeviceSynchronize());
hipfftExecR2C(gchForward, gpfProcessSpectrumK, gpcProcessDepthProfile);
/* mask */
// calculate mask
nThreadsPerBlock = 512;
calculateMask << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfDispersionMask, gnRawLineLength, 50, 100, 16); // need to get values from C# UI
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyMask << <d3Blocks, d3Threads >> > (gpcProcessDepthProfile, gpfDispersionMask, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
/* reverse fft */
hipfftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, HIPFFT_BACKWARD);
/* calculate phase */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (nNumberLinesPerChunk >> 1) / d3Threads.y;
d3Blocks.z = 1;
calculatePhase << <d3Blocks, d3Threads >> > (gpcProcessSpectrum, gpfProcessPhase, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
/* dispersion correction */
// calculate dispersion correction
nThreadsPerBlock = 512;
calculateDispersionCorrection << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfProcessPhase, gpcDispersionCorrection);
// apply correction
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyDispersionCorrection << <d3Blocks, d3Threads >> > (gpfProcessSpectrumK, gpcDispersionCorrection, gpcProcessKCorrected, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
/* forward fft */
hipfftExecC2C(gchForwardComplex, gpcProcessKCorrected, gpcProcessDepthProfile, HIPFFT_FORWARD);
gpuErrchk(hipDeviceSynchronize());
// copy to results array
switch (nCam) {
case 0: // parallel camera
switch (nOddEven) {
case 0: // even lines
break;
case 1: // odd lines
break;
}
break;
case 1: // perpendicular camera
switch (nOddEven) {
case 0: // even lines
break;
case 1: // odd lines
break;
}
break;
}
} // for (int nOddEven = 0; nOddEven < 2; nOddEven++)
} // for (int nChunk = 0; nChunk < nNumberChunks; nChunk++)
} // for (int nCam = 0; nCam < 2; nCam++)
return -1;
}
| 3c77807621afe650e22741f6f2452927db98c6ce.cu |
#include "nOCTcudaDLLHeader.cuh"
// status and control parameters
static int gnMode = -1;
static int gnAllocationStatus = 0;
static bool gbIsReferenceRecorded = false;
static bool gbIsReferenceLoaded = false;
static bool gbIsCalibrationLoaded = false;
static bool gbIsDispersionLoaded = false;
static int gnRawLineLength;
static int gnRawNumberLines; // number of lines in a frame
// static int gnCalibrationNumberLines;
static int gnProcessNumberLines; // number of lines in a chunk
static int gnProcessedNumberLines;
static int gnPerpendicular;
static int gnMidLength;
/* raw spectra arrays */
// common
static short* d_gpnRawIMAQ; // device: raw spectra from camera
static float* d_gpfRawIMAQ; // device: raw spectra (gpfRawCalibration)
static float* gpfIMAQPitched; // device: raw spectra copied to pitched memory (gpfProcessCalibration)
static size_t gnIMAQPitch; // gnProcessCalibrationPitch
// PS-SD-OCT
static short* d_gpnRawIMAQParallel; // device: raw spectra from camera
static float* d_gpfRawIMAQParallel; // device: raw spectra (gpfRawCalibration)
static short* d_gpnRawIMAQPerpendicular; // device: raw spectra from camera
static float* d_gpfRawIMAQPerpendicular; // device: raw spectra (gpfRawCalibration)
/* reference */
// common
static float* gpfReference;
// PS-SD-OCT
static float* gpfReferenceParallel;
static float* gpfReferencePerpendicular;
/* fft */
static cufftHandle gchForward;
static cufftComplex* gpcProcessDepthProfile;
static size_t gnProcessDepthProfilePitch;
// calibration mask
static int gnCalibrationStart;
static int gnCalibrationStop;
static int gnCalibrationRound;
static float* gpfCalibrationMask;
// reverse fft
static cufftComplex* gpcProcessSpectrum;
static size_t gnProcessSpectrumPitch;
static cufftHandle gchReverse;
// phase
static float* gpfProcessPhase;
static size_t gnProcessPhasePitch;
// unwrap
static float gfPiEps = (float)(acos(-1.0) - 1.0e-30);
static float gf2Pi = (float)(2.0 * acos(-1.0));
/* linear fit and interpolation */
// common
static float* gpfLeftPhase;
static float* gpfRightPhase;
static float* gpfKLineCoefficients;
static float* gpfProcessK;
static size_t gnKPitch;
static int* gpnProcessIndex;
static size_t gnIndexPitch;
static int* gpnProcessAssigned;
static size_t gnAssignedPitch;
static int gnKMode;
static float* gpfProcessSpectrumK;
static size_t gnSpectrumKPitch;
// PS-SD-OCT
static float* gpfProcessKParallel;
static float* gpfProcessKPerpendicular;
static int* gpnProcessIndexParallel;
static int* gpnProcessIndexPerpendicular;
static size_t gnKPitchParallel;
static size_t gnKPitchPerpendicular;
static size_t gnIndexPitchParallel;
static size_t gnIndexPitchPerpendicular;
// dispersion mask
static int gnDispersionStart;
static int gnDispersionStop;
static int gnDispersionRound;
static float* gpfDispersionMask;
// dispersion correction
static cufftComplex* gpcDispersionCorrection;
static cufftHandle gchForwardComplex;
static cufftComplex* gpcProcessKCorrected;
static size_t gnKCorrectedPitch;
// static float* gpfProcessOCT;
static size_t gnProcessOCTPitch;
static cufftComplex* gpcProcessedOCT;
// PS-SD-OCT
static cufftComplex* gpcProcessedOCTParallelOdd;
static cufftComplex* gpcProcessedOCTParallelEven;
static cufftComplex* gpcProcessedOCTPerpendicularOdd;
static cufftComplex* gpcProcessedOCTPerpendicularEven;
int getDeviceCount(int* nNumberDevices) {
// check for GPU
int nDevices = -1;
int nRet = cudaGetDeviceCount(&nDevices);
if (nRet == cudaSuccess)
*(nNumberDevices) = nDevices;
return nRet;
}
int getDeviceName(int nDeviceNumber, char* strDeviceName) {
// check for GPU
cudaDeviceProp currentDevice;
int nRet = cudaGetDeviceProperties(¤tDevice, nDeviceNumber);
if (nRet == cudaSuccess) {
sprintf(strDeviceName, "%s (%d SMs, %d b/s, %d t/b, %d t/s, %d shared kB, %d GB)",
currentDevice.name,
currentDevice.multiProcessorCount,
currentDevice.maxBlocksPerMultiProcessor,
currentDevice.maxThreadsPerBlock,
currentDevice.maxThreadsPerMultiProcessor,
currentDevice.sharedMemPerBlock / 1024,
currentDevice.totalGlobalMem / 1024 / 1024 / 1024);
} // if (nRet
return nRet;
}
int checkStatus() {
return -1;
}
int initialize(int nMode, int nRawLineLength, int nRawNumberLines, int nProcessNumberLines, int nProcessedNumberLines) {
cleanup(nMode);
// copy parameters to global parameters
gnMode = nMode;
gnRawLineLength = nRawLineLength;
gnRawNumberLines = nRawNumberLines; // number of lines in a frame
gnProcessNumberLines = nProcessNumberLines; // number of lines in a chunk
gnProcessedNumberLines = nProcessedNumberLines;
int nActualProcessNumberLines;
// allocate memory
switch (nMode) {
case 0: // SD-OCT
gnPerpendicular = 0;
// gnCalibrationNumberLines = 1;
nActualProcessNumberLines = gnProcessNumberLines;
// gpuErrchk(cudaMallocHost((void**)&h_gpnRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(cudaMalloc((void**)&d_gpnRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(cudaMalloc((void**)&d_gpfRawIMAQ, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&gpfReference, gnRawLineLength * sizeof(float)));
break;
case 1: // PS SD-OCT
gnPerpendicular = 1;
// gnCalibrationNumberLines = gnRawNumberLines; // QUESTION: what is this parameter?
nActualProcessNumberLines = gnProcessNumberLines >> 1; // only process every other line in each array
gpuErrchk(cudaMalloc((void**)&d_gpnRawIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(cudaMalloc((void**)&d_gpfRawIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_gpnRawIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(short)));
gpuErrchk(cudaMalloc((void**)&d_gpfRawIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&gpfReferenceParallel, gnRawLineLength * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&gpfReferencePerpendicular, gnRawLineLength * sizeof(float)));
gpuErrchk(cudaMallocPitch((void**)&gpfProcessKParallel, &gnKPitchParallel, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpfProcessKPerpendicular, &gnKPitchPerpendicular, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpnProcessIndexParallel, &gnIndexPitchParallel, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpnProcessIndexPerpendicular, &gnIndexPitchPerpendicular, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(cudaMalloc((void**)&gpcProcessedOCTParallelOdd, (gnMidLength * gnProcessedNumberLines) * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&gpcProcessedOCTParallelEven, (gnMidLength * gnProcessedNumberLines) * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&gpcProcessedOCTPerpendicularOdd, (gnMidLength * gnProcessedNumberLines) * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&gpcProcessedOCTPerpendicularEven, (gnMidLength * gnProcessedNumberLines) * sizeof(cufftComplex)));
break;
case 2: // line field
gnPerpendicular = 0;
// gnCalibrationNumberLines = 1;
break;
case 3: // OFDI
gnPerpendicular = 0;
// gnCalibrationNumberLines = gnRawNumberLines;
break;
case 4: // PS OFDI
gnPerpendicular = 1;
// gnCalibrationNumberLines = gnRawNumberLines;
break;
} // switch (nMode)
gpuErrchk(cudaMallocPitch((void**)&gpfIMAQPitched, &gnIMAQPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gnMidLength = (int)(gnRawLineLength / 2 + 1);
gpuErrchk(cudaMallocPitch((void**)&gpcProcessDepthProfile, &gnProcessDepthProfilePitch, gnRawLineLength * sizeof(cufftComplex), nActualProcessNumberLines));
int nRank = 1;
int pn[] = { gnRawLineLength };
int nIStride = 1, nOStride = 1;
int nIDist = gnIMAQPitch / sizeof(float);
int nODist = gnProcessDepthProfilePitch / sizeof(cufftComplex);
int pnINEmbed[] = { 0 };
int pnONEmbed[] = { 0 };
int nBatch = gnProcessNumberLines >> 1;
cufftPlanMany(&gchForward, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, CUFFT_R2C, nBatch);
gpuErrchk(cudaMalloc((void**)&gpfCalibrationMask, gnRawLineLength * sizeof(float)));
gpuErrchk(cudaMallocPitch((void**)&gpcProcessSpectrum, &gnProcessSpectrumPitch, gnRawLineLength * sizeof(cufftComplex), nActualProcessNumberLines));
nIDist = gnProcessDepthProfilePitch / sizeof(cufftComplex);
nODist = gnProcessSpectrumPitch / sizeof(cufftComplex);
cufftPlanMany(&gchReverse, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, CUFFT_C2C, nBatch);
gpuErrchk(cudaMallocPitch((void**)&gpfProcessPhase, &gnProcessPhasePitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
cudaMalloc((void**)&gpfLeftPhase, sizeof(float));
cudaMalloc((void**)&gpfRightPhase, sizeof(float));
cudaMalloc((void**)&gpfKLineCoefficients, 2 * sizeof(float));
gpuErrchk(cudaMallocPitch((void**)&gpfProcessK, &gnKPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpnProcessIndex, &gnIndexPitch, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpnProcessAssigned, &gnAssignedPitch, gnRawLineLength * sizeof(int), nActualProcessNumberLines));
gpuErrchk(cudaMallocPitch((void**)&gpfProcessSpectrumK, &gnSpectrumKPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
// gpuErrchk(cudaMallocPitch((void**)&gpfProcessOCT, &gnProcessOCTPitch, gnRawLineLength * sizeof(float), nActualProcessNumberLines));
gpuErrchk(cudaMallocHost((void**)&gpcProcessedOCT, (gnMidLength * gnProcessedNumberLines) * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&gpfDispersionMask, gnRawLineLength * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&gpcDispersionCorrection, gnRawLineLength * sizeof(cufftComplex)));
gpuErrchk(cudaMallocPitch((void**)&gpcProcessKCorrected, &gnKCorrectedPitch, gnRawLineLength * sizeof(cufftComplex), nActualProcessNumberLines));
nIDist = gnKCorrectedPitch / sizeof(cufftComplex);
cufftPlanMany(&gchForwardComplex, nRank, pn, pnINEmbed, nIStride, nIDist, pnONEmbed, nOStride, nODist, CUFFT_C2C, nBatch);
gpuErrchk(cudaDeviceSynchronize()); // QUESTION: will cudaDeviceSynchronize slow down the performance?
gnAllocationStatus = 1;
return -1;
} // int initialize
int cleanup(int nMode) {
// free memory allocations
if (gnAllocationStatus == 1) {
switch (nMode)
{
case 0: // SD-OCT
// gpuErrchk(cudaFreeHost(h_gpnRawIMAQ));
gpuErrchk(cudaFree(d_gpnRawIMAQ));
gpuErrchk(cudaFree(d_gpfRawIMAQ));
gpuErrchk(cudaFree(gpfReference));
break;
case 1: // PS SD-OCT
gpuErrchk(cudaFree(d_gpnRawIMAQParallel));
gpuErrchk(cudaFree(d_gpfRawIMAQParallel));
gpuErrchk(cudaFree(d_gpnRawIMAQPerpendicular));
gpuErrchk(cudaFree(d_gpfRawIMAQPerpendicular));
gpuErrchk(cudaFree(gpfReferenceParallel));
gpuErrchk(cudaFree(gpfReferencePerpendicular));
gpuErrchk(cudaFree(gpfProcessKParallel));
gpuErrchk(cudaFree(gpfProcessKPerpendicular));
gpuErrchk(cudaFree(gpnProcessIndexParallel));
gpuErrchk(cudaFree(gpnProcessIndexPerpendicular));
break;
case 2: // line field
break;
case 3: // OFDI
break;
case 4: // PS OFDI
break;
break;
}
gpuErrchk(cudaFree(gpfIMAQPitched));
gpuErrchk(cudaFree(gpcProcessDepthProfile));
cufftDestroy(gchForward);
gpuErrchk(cudaFree(gpfCalibrationMask));
gpuErrchk(cudaFree(gpcProcessSpectrum));
cufftDestroy(gchReverse);
gpuErrchk(cudaFree(gpfProcessPhase));
cudaFree(gpfLeftPhase);
cudaFree(gpfRightPhase);
cudaFree(gpfKLineCoefficients);
cudaFree(gpfProcessK);
cudaFree(gpnProcessIndex);
cudaFree(gpnProcessAssigned);
cudaFree(gpfProcessSpectrumK);
// cudaFree(gpfProcessOCT);
cudaFreeHost(gpcProcessedOCT);
gpuErrchk(cudaFree(gpfDispersionMask));
gpuErrchk(cudaFree(gpcDispersionCorrection));
cufftDestroy(gchForwardComplex);
cudaFree(gpcProcessKCorrected);
gnAllocationStatus = 0;
} // if (gnAllocationStatus
return -1;
}
int getReferenceData(int nMode, short* pnReferenceParallel, short* pnReferencePerpendicular, bool bIsReferenceRecorded) {
// copy parameters to global parameters
gbIsReferenceRecorded = bIsReferenceRecorded;
if (bIsReferenceRecorded == true) {
switch (nMode) {
case 0: // SD-OCT
// data type conversion (on host)
float* pfReference;
pfReference = (float*)malloc(gnRawLineLength * sizeof(float));
for (int i; i < gnRawLineLength; i++) {
pfReference[i] = (float)pnReferenceParallel[i];
}
// copy data to device
gpuErrchk(cudaMemcpy(gpfReference, pfReference, gnRawLineLength * sizeof(short), cudaMemcpyHostToDevice));
gpuErrchk(cudaDeviceSynchronize());
free(pfReference);
gbIsReferenceLoaded = true;
break;
case 1: // PS SD-OCT
// data type conversion (on host)
float* pfReferenceParallel, * pfReferencePerpendicular;
pfReferenceParallel = (float*)malloc(gnRawLineLength * sizeof(float));
pfReferencePerpendicular = (float*)malloc(gnRawLineLength * sizeof(float));
for (int i; i < gnRawLineLength; i++) {
pfReferenceParallel[i] = (float)pnReferenceParallel[i];
pfReferencePerpendicular[i] = (float)pnReferencePerpendicular[i];
}
// copy data to device
gpuErrchk(cudaMemcpy(gpfReferenceParallel, pfReferenceParallel, gnRawLineLength * sizeof(short), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpfReferencePerpendicular, pfReferencePerpendicular, gnRawLineLength * sizeof(short), cudaMemcpyHostToDevice));
gpuErrchk(cudaDeviceSynchronize());
free(pfReferenceParallel);
free(pfReferencePerpendicular);
gbIsReferenceLoaded = true;
break;
case 2: // line field
break;
case 3: // OFDI
break;
case 4: // PS OFDI
break;
} // switch (nMode)
}
return -1;
}
int getCalibrationData() {
return -1;
}
int getDataSDOCT(void* pnIMAQ) {
return -1;
}
int getDataPSSDOCT(void* pnIMAQParallel, void* pnIMAQPerpendicular) {
// copy data to device
gpuErrchk(cudaMemcpy(d_gpnRawIMAQParallel, pnIMAQParallel, (gnRawLineLength * gnRawNumberLines) * sizeof(short), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_gpnRawIMAQPerpendicular, pnIMAQPerpendicular, (gnRawLineLength * gnRawNumberLines) * sizeof(short), cudaMemcpyHostToDevice));
gpuErrchk(cudaDeviceSynchronize());
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
// convert to float type
d3Threads.x = 512; d3Threads.y = 1; d3Threads.z = 1;
d3Blocks.x = (gnRawLineLength * gnRawNumberLines - 1) / d3Threads.x + 1;
d3Blocks.y = 1; d3Blocks.z = 1;
convert2Float << <d3Blocks, d3Threads >> > (d_gpnRawIMAQParallel, d_gpfRawIMAQParallel, gnRawLineLength * gnRawNumberLines);
gpuErrchk(cudaPeekAtLastError());
convert2Float << <d3Blocks, d3Threads >> > (d_gpnRawIMAQPerpendicular, d_gpfRawIMAQPerpendicular, gnRawLineLength * gnRawNumberLines);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
return -1;
}
int calculateSpectralDomainCalibration(int nMode) { // can be used in both SD-OCT and PS SD-OCT, not in DLL
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
int nActualProcessNumberLines;
switch (nMode) {
case 0: // SD-OCT
nActualProcessNumberLines = gnProcessNumberLines;
break;
case 1: // PS SD-OCT
nActualProcessNumberLines = gnProcessNumberLines >> 1;
break;
}
/********** calibration ************/
/* forward fft */
gpuErrchk(cudaMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, nActualProcessNumberLines));
cufftExecR2C(gchForward, gpfIMAQPitched, gpcProcessDepthProfile);
/* mask */
// calculate mask: QUESTION can be done in CPU in the initialize function? (small data size, avoid warp divergence)
nThreadsPerBlock = 512;
calculateMask << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfCalibrationMask, gnRawLineLength, 50, 100, 16); // grab these numbers from C# UI
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyMask << <d3Blocks, d3Threads >> > (gpcProcessDepthProfile, gpfCalibrationMask, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
/* reverse fft */
cufftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, CUFFT_INVERSE);
/* calculate phase */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = nActualProcessNumberLines / d3Threads.y;
d3Blocks.z = 1;
calculatePhase << <d3Blocks, d3Threads >> > (gpcProcessSpectrum, gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
d3Threads.x = 256;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
unwrapPhase << <d3Blocks, d3Threads >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, gfPiEps, gf2Pi);
d3Threads.x = 256;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
matchPhase << <d3Blocks, d3Threads >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, gf2Pi);
nThreadsPerBlock = 32;
getPhaseLimits << <2, nThreadsPerBlock >> > (gpfProcessPhase, nActualProcessNumberLines, gnRawLineLength, 32, gnRawLineLength - 32, gpfLeftPhase, gpfRightPhase);
gnKMode = 1;
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
cudaMemset2D(gpnProcessAssigned, gnAssignedPitch, 0, gnRawLineLength * sizeof(int), nActualProcessNumberLines);
calculateK << <d3Blocks, d3Threads >> > (gpfProcessPhase, gpfProcessK, gpnProcessAssigned, gpnProcessIndex, nActualProcessNumberLines, gnRawLineLength, \
gpfKLineCoefficients, 32, gnRawLineLength - 32, gpfLeftPhase, gpfRightPhase, gnKMode);
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nActualProcessNumberLines / d3Threads.y;
d3Blocks.y = 1;
d3Blocks.z = 1;
cleanIndex << <d3Blocks, d3Threads >> > (gpfProcessK, gpnProcessIndex, gpnProcessAssigned, nActualProcessNumberLines, gnRawLineLength);
gpuErrchk(cudaDeviceSynchronize());
return -1;
}
int outputCalibrationPSSDOCT(void* pnIMAQParallel, void* pnIMAQPerpendicular, float* pfKParallel, float* pfKPerpendicular, int* pnIndexParallel, int* pnIndexPerpendicular) {
// output pfK, pnIndex and return to C# and save
// get a frame of data
getDataPSSDOCT(pnIMAQParallel, pnIMAQPerpendicular);
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
int nActualProcessNumberLines = gnProcessNumberLines >> 1; // half of a chunk
// allocate host arrays
pfKParallel = (float*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(float));
pfKPerpendicular = (float*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(float));
pnIndexParallel = (int*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(int));
pnIndexPerpendicular = (int*)malloc(gnRawLineLength * nActualProcessNumberLines * sizeof(int));
// loop through cameras
for (int nCam = 0; nCam < 2; nCam++) { // nCam = 0: parallel camera; nCam = 1: perpendicular camera
/* copy data */
switch (nCam) {
case 0: // parallel camera
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel, gnIMAQPitch, gnIMAQPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice));
break;
case 1: // perpendicular camera
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular, gnIMAQPitch, gnIMAQPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice));
break;
}
gpuErrchk(cudaDeviceSynchronize());
/* reference */
if (gbIsReferenceRecorded == false) { // no reference data recorded
// calculate reference
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nActualProcessNumberLines, gnRawLineLength);
break;
case 1: // perpendicular camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nActualProcessNumberLines, gnRawLineLength);
break;
}
gpuErrchk(cudaPeekAtLastError());
} // if (gbIsReferenceRecorded == false)
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nActualProcessNumberLines, gnRawLineLength);
break;
case 1: // perpendicular camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nActualProcessNumberLines, gnRawLineLength);
break;
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
/* calibration */
calculateSpectralDomainCalibration(1); // nMode = 1: PS SD-OCT
// gpfProcessK, gpnProcessIndex, gpnProcessAssigned
// output calibration parameters
switch (nCam) {
case 0: // parallel camera
gpuErrchk(cudaMemcpy2D(gpfProcessKParallel, gnKPitchParallel, gpfProcessK, gnKPitch, gnKPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice)); // QUESTION: ok if only copy one line?
gpuErrchk(cudaMemcpy2D(gpnProcessIndexParallel, gnIndexPitchParallel, gpnProcessIndex, gnIndexPitch, gnIndexPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice));
break;
case 1: // perpendicular camera
gpuErrchk(cudaMemcpy2D(gpfProcessKPerpendicular, gnKPitchPerpendicular, gpfProcessK, gnKPitch, gnKPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice)); // QUESTION: ok if only copy one line?
gpuErrchk(cudaMemcpy2D(gpnProcessIndexPerpendicular, gnIndexPitchPerpendicular, gpnProcessIndex, gnIndexPitch, gnIndexPitch, nActualProcessNumberLines, cudaMemcpyDeviceToDevice));
} // switch (nCam)
gpuErrchk(cudaDeviceSynchronize());
} // for (int nCam = 0; nCam < 2; nCam++)
// copy data to host for output
gpuErrchk(cudaMemcpy(pfKParallel, gpfProcessKParallel, gnRawLineLength * nActualProcessNumberLines * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(pfKPerpendicular, gpfProcessKPerpendicular, gnRawLineLength * nActualProcessNumberLines * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(pnIndexParallel, gpnProcessIndexParallel, gnRawLineLength * nActualProcessNumberLines * sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(pnIndexPerpendicular, gpnProcessIndexPerpendicular, gnRawLineLength * nActualProcessNumberLines * sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaDeviceSynchronize());
return -1;
}
int processSDOCT() {
return -1;
}
int processPSSDOCT() {
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
// loop through cameras
for (int nCam = 0; nCam < 2; nCam++) { // nCam = 0: parallel camera; nCam = 1: perpendicular camera
int nNumberLinesPerChunk = gnProcessNumberLines; // value set in C# UI
int nNumberChunks = (gnRawNumberLines - 1) / gnProcessNumberLines + 1; // QUESTION: need to double check. why previous method?
// loop through chunks
for (int nChunk = 0; nChunk < nNumberChunks; nChunk++) {
// loop through even and odd lines, respectively
int nSrcPtrOffset = nChunk * (gnRawLineLength * nNumberLinesPerChunk);
for (int nOddEven = 0; nOddEven < 2; nOddEven++) { // nOddEven = 0: process even lines; nOddEven = 1; process odd lines
// copy a chunk: in each data array (on device now), copy every other line
switch (nCam) {
case 0: // parallel camera
switch (nOddEven) {
case 0: // even lines
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, cudaMemcpyDeviceToDevice));
break;
case 1: // odd lines
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQParallel + gnRawLineLength + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, cudaMemcpyDeviceToDevice));
break;
} // switch (nOddEven)
break;
case 1: // perpendicular camera
switch (nOddEven) {
case 0: // even lines
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, cudaMemcpyDeviceToDevice));
break;
case 1: // odd lines
gpuErrchk(cudaMemcpy2D(gpfIMAQPitched, gnIMAQPitch, d_gpfRawIMAQPerpendicular + gnRawLineLength + nSrcPtrOffset, 2 * gnIMAQPitch, gnIMAQPitch, nNumberLinesPerChunk >> 1, cudaMemcpyDeviceToDevice));
break;
} // switch (nOddEven)
break;
}
gpuErrchk(cudaDeviceSynchronize());
/* reference */
if (gbIsReferenceRecorded == false) { // no reference data recorded
// calculate reference
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
case 1: // perpendicular camera
calculateMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
}
gpuErrchk(cudaPeekAtLastError());
} // if (gbIsReferenceRecorded == false)
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
// different cameras
switch (nCam) {
case 0: // parallel camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferenceParallel, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
case 1: // perpendicular camera
subtractMean << <d3Blocks, d3Threads >> > (gpfIMAQPitched, gpfReferencePerpendicular, nNumberLinesPerChunk >> 1, gnRawLineLength);
break;
}
// now gpfIMAQPitched is the result of reference subtraction (fringes), SAME AS gpfProcessOCT
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
/* calibration */
if (gbIsCalibrationLoaded == false) {
calculateSpectralDomainCalibration(1); // nMode = 1: PS SD-OCT
// gpfProcessK, gpnProcessIndex, gpnProcessAssigned
}
/* interpolation */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (nNumberLinesPerChunk >> 1) / d3Threads.y;
d3Blocks.z = 1;
interpCubicSpline << <d3Blocks, d3Threads >> > (gpfProcessK, gpnProcessIndex, gpfIMAQPitched, gpfProcessSpectrumK, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(cudaDeviceSynchronize());
/* forward fft */
gpuErrchk(cudaMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, nNumberLinesPerChunk >> 1));
gpuErrchk(cudaDeviceSynchronize());
cufftExecR2C(gchForward, gpfProcessSpectrumK, gpcProcessDepthProfile);
/* mask */
// calculate mask
nThreadsPerBlock = 512;
calculateMask << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfDispersionMask, gnRawLineLength, 50, 100, 16); // need to get values from C# UI
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyMask << <d3Blocks, d3Threads >> > (gpcProcessDepthProfile, gpfDispersionMask, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
/* reverse fft */
cufftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, CUFFT_INVERSE);
/* calculate phase */
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (nNumberLinesPerChunk >> 1) / d3Threads.y;
d3Blocks.z = 1;
calculatePhase << <d3Blocks, d3Threads >> > (gpcProcessSpectrum, gpfProcessPhase, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
/* dispersion correction */
// calculate dispersion correction
nThreadsPerBlock = 512;
calculateDispersionCorrection << <gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock >> > (gpfProcessPhase, gpcDispersionCorrection);
// apply correction
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = nNumberLinesPerChunk / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyDispersionCorrection << <d3Blocks, d3Threads >> > (gpfProcessSpectrumK, gpcDispersionCorrection, gpcProcessKCorrected, nNumberLinesPerChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
/* forward fft */
cufftExecC2C(gchForwardComplex, gpcProcessKCorrected, gpcProcessDepthProfile, CUFFT_FORWARD);
gpuErrchk(cudaDeviceSynchronize());
// copy to results array
switch (nCam) {
case 0: // parallel camera
switch (nOddEven) {
case 0: // even lines
break;
case 1: // odd lines
break;
}
break;
case 1: // perpendicular camera
switch (nOddEven) {
case 0: // even lines
break;
case 1: // odd lines
break;
}
break;
}
} // for (int nOddEven = 0; nOddEven < 2; nOddEven++)
} // for (int nChunk = 0; nChunk < nNumberChunks; nChunk++)
} // for (int nCam = 0; nCam < 2; nCam++)
return -1;
}
|
5652f7b6e7e5f5b672ea8a51190686ae5c404e97.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include "alsi/cuda/als.h"
#include "alsi/cuda/utils.cuh"
namespace alsi {
__inline__ __device__
bool linear_search(int * start, int * end, int target) {
__shared__ bool ret;
if (threadIdx.x == 0) ret = false;
__syncthreads();
int size = end - start;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
if (start[i] == target) {
ret = true;
}
}
__syncthreads();
return ret;
}
__global__ void bpr_update_kernel(int samples, unsigned int * random_likes,
unsigned int * random_dislikes,
int * itemids, int * userids, int * indptr,
int factors,
float * X, float * Y,
float learning_rate, float reg,
bool verify_negative_samples,
int * stats) {
extern __shared__ float shared_memory[];
float * temp = &shared_memory[0];
int correct = 0, skipped = 0;
for (int i = blockIdx.x; i < samples; i += gridDim.x) {
int liked_index = random_likes[i] % samples,
disliked_index = random_dislikes[i] % samples;
int userid = userids[liked_index],
likedid = itemids[liked_index],
dislikedid = itemids[disliked_index];
if (verify_negative_samples &&
linear_search(&itemids[indptr[userid]], &itemids[indptr[userid+1]], dislikedid)) {
skipped += 1;
continue;
}
float * user = &X[userid * factors],
* liked = &Y[likedid * factors],
* disliked = &Y[dislikedid * factors];
float user_val = user[threadIdx.x],
liked_val = liked[threadIdx.x],
disliked_val = disliked[threadIdx.x];
temp[threadIdx.x] = liked_val - disliked_val;
float score = dot(user, temp);
float z = 1.0 / (1.0 + exp(score));
if (z < .5) correct++;
liked[threadIdx.x] += learning_rate * ( z * user_val - reg * liked_val);
disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val);
if (threadIdx.x < factors){
user[threadIdx.x] += learning_rate * ( z * (liked_val - disliked_val) - reg * user_val);
}
}
if (threadIdx.x == 0) {
atomicAdd(stats, correct);
atomicAdd(stats + 1, skipped);
}
}
#define CHECK_CURAND(code) { checkCurand((code), __FILE__, __LINE__); }
inline void checkCurand(hiprandStatus_t code, const char *file, int line) {
if (code != HIPRAND_STATUS_SUCCESS) {
std::stringstream err;
err << "CURAND error: " << code << " (" << file << ":" << line << ")";
throw std::runtime_error(err.str());
}
}
std::pair<int, int> bpr_update(const CudaVector<int> & userids,
const CudaVector<int> & itemids,
const CudaVector<int> & indptr,
CudaDenseMatrix * X,
CudaDenseMatrix * Y,
float learning_rate, float reg, long seed,
bool verify_negative_samples) {
if (X->cols != Y->cols) throw std::invalid_argument("X and Y should have the same number of columns");
if (userids.size != itemids.size)
throw std::invalid_argument("userids and itemids should have same number of elements");
int nonzeros = userids.size;
int * stats;
CHECK_CUDA(hipMalloc(&stats, sizeof(int) * 2));
CHECK_CUDA(hipMemset(stats, 0, sizeof(int) * 2));
unsigned int * random_likes, * random_dislikes;
CHECK_CUDA(hipMalloc(&random_likes, nonzeros * sizeof(unsigned int)));
CHECK_CUDA(hipMalloc(&random_dislikes, nonzeros * sizeof(unsigned int)));
hiprandGenerator_t rng;
CHECK_CURAND(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(rng, seed));
CHECK_CURAND(hiprandGenerate(rng, random_likes, nonzeros));
CHECK_CURAND(hiprandGenerate(rng, random_dislikes, nonzeros));
int devId;
CHECK_CUDA(hipGetDevice(&devId));
int multiprocessor_count;
CHECK_CUDA(hipDeviceGetAttribute(&multiprocessor_count,
hipDeviceAttributeMultiprocessorCount,
devId));
int factors = X->cols;
int block_count = 128 * multiprocessor_count;
int thread_count = factors;
int shared_memory_size = sizeof(float) * (factors);
hipLaunchKernelGGL(( bpr_update_kernel), dim3(block_count), dim3(thread_count), shared_memory_size, 0,
nonzeros, random_likes, random_dislikes,
itemids.data, userids.data, indptr.data,
factors,
X->data, Y->data, learning_rate, reg,
verify_negative_samples,
stats);
CHECK_CUDA(hipDeviceSynchronize());
int output[2];
CHECK_CUDA(hipMemcpy(output, stats, 2 * sizeof(int), hipMemcpyDeviceToHost));
CHECK_CUDA(hipFree(random_likes));
CHECK_CUDA(hipFree(random_dislikes));
CHECK_CUDA(hipFree(stats));
return std::make_pair(output[0], output[1]);
}
} | 5652f7b6e7e5f5b672ea8a51190686ae5c404e97.cu | #include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include "alsi/cuda/als.h"
#include "alsi/cuda/utils.cuh"
namespace alsi {
__inline__ __device__
bool linear_search(int * start, int * end, int target) {
__shared__ bool ret;
if (threadIdx.x == 0) ret = false;
__syncthreads();
int size = end - start;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
if (start[i] == target) {
ret = true;
}
}
__syncthreads();
return ret;
}
__global__ void bpr_update_kernel(int samples, unsigned int * random_likes,
unsigned int * random_dislikes,
int * itemids, int * userids, int * indptr,
int factors,
float * X, float * Y,
float learning_rate, float reg,
bool verify_negative_samples,
int * stats) {
extern __shared__ float shared_memory[];
float * temp = &shared_memory[0];
int correct = 0, skipped = 0;
for (int i = blockIdx.x; i < samples; i += gridDim.x) {
int liked_index = random_likes[i] % samples,
disliked_index = random_dislikes[i] % samples;
int userid = userids[liked_index],
likedid = itemids[liked_index],
dislikedid = itemids[disliked_index];
if (verify_negative_samples &&
linear_search(&itemids[indptr[userid]], &itemids[indptr[userid+1]], dislikedid)) {
skipped += 1;
continue;
}
float * user = &X[userid * factors],
* liked = &Y[likedid * factors],
* disliked = &Y[dislikedid * factors];
float user_val = user[threadIdx.x],
liked_val = liked[threadIdx.x],
disliked_val = disliked[threadIdx.x];
temp[threadIdx.x] = liked_val - disliked_val;
float score = dot(user, temp);
float z = 1.0 / (1.0 + exp(score));
if (z < .5) correct++;
liked[threadIdx.x] += learning_rate * ( z * user_val - reg * liked_val);
disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val);
if (threadIdx.x < factors){
user[threadIdx.x] += learning_rate * ( z * (liked_val - disliked_val) - reg * user_val);
}
}
if (threadIdx.x == 0) {
atomicAdd(stats, correct);
atomicAdd(stats + 1, skipped);
}
}
#define CHECK_CURAND(code) { checkCurand((code), __FILE__, __LINE__); }
inline void checkCurand(curandStatus_t code, const char *file, int line) {
if (code != CURAND_STATUS_SUCCESS) {
std::stringstream err;
err << "CURAND error: " << code << " (" << file << ":" << line << ")";
throw std::runtime_error(err.str());
}
}
std::pair<int, int> bpr_update(const CudaVector<int> & userids,
const CudaVector<int> & itemids,
const CudaVector<int> & indptr,
CudaDenseMatrix * X,
CudaDenseMatrix * Y,
float learning_rate, float reg, long seed,
bool verify_negative_samples) {
if (X->cols != Y->cols) throw std::invalid_argument("X and Y should have the same number of columns");
if (userids.size != itemids.size)
throw std::invalid_argument("userids and itemids should have same number of elements");
int nonzeros = userids.size;
int * stats;
CHECK_CUDA(cudaMalloc(&stats, sizeof(int) * 2));
CHECK_CUDA(cudaMemset(stats, 0, sizeof(int) * 2));
unsigned int * random_likes, * random_dislikes;
CHECK_CUDA(cudaMalloc(&random_likes, nonzeros * sizeof(unsigned int)));
CHECK_CUDA(cudaMalloc(&random_dislikes, nonzeros * sizeof(unsigned int)));
curandGenerator_t rng;
CHECK_CURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, seed));
CHECK_CURAND(curandGenerate(rng, random_likes, nonzeros));
CHECK_CURAND(curandGenerate(rng, random_dislikes, nonzeros));
int devId;
CHECK_CUDA(cudaGetDevice(&devId));
int multiprocessor_count;
CHECK_CUDA(cudaDeviceGetAttribute(&multiprocessor_count,
cudaDevAttrMultiProcessorCount,
devId));
int factors = X->cols;
int block_count = 128 * multiprocessor_count;
int thread_count = factors;
int shared_memory_size = sizeof(float) * (factors);
bpr_update_kernel<<<block_count, thread_count, shared_memory_size>>>(
nonzeros, random_likes, random_dislikes,
itemids.data, userids.data, indptr.data,
factors,
X->data, Y->data, learning_rate, reg,
verify_negative_samples,
stats);
CHECK_CUDA(cudaDeviceSynchronize());
int output[2];
CHECK_CUDA(cudaMemcpy(output, stats, 2 * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaFree(random_likes));
CHECK_CUDA(cudaFree(random_dislikes));
CHECK_CUDA(cudaFree(stats));
return std::make_pair(output[0], output[1]);
}
} |
571c307172b24767c4f789e77299fac627352d56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "adagrad_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
__global__ void AdagradUpdate(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = nh[i] = decay * h[i] + gi * gi;
nw[i] = w[i] + lr[0] * gi / (std::sqrt(hi) + epsilon);
}
}
template <>
void adagrad_update<CUDAContext>(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
CUDAContext* context) {
hipLaunchKernelGGL(( AdagradUpdate),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, w, g, h, nw, nh, epsilon, decay, lr);
}
template <typename SIndex>
__global__ void SparseAdagradKernel(
const size_t N,
const size_t grad_slice_sz,
const float epsilon,
float *param,
float *param_mom,
const SIndex *indices,
const float *grad,
const float *lr)
{
const float LR = lr[0];
CUDA_1D_KERNEL_LOOP(i, N)
{
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
const float mom_new = param_mom[paramIdx] + grad[gradIdx] * grad[gradIdx];
param_mom[paramIdx] = mom_new;
param[paramIdx] += LR * grad[gradIdx] / (sqrt(mom_new) + epsilon);
}
}
template<>
template<typename SIndex>
bool SparseAdagradOp<float, CUDAContext>::DoRunWithType()
{
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
hipLaunchKernelGGL(( SparseAdagradKernel<SIndex>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N, grad_slice_sz, epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CUDAContext>);
}
| 571c307172b24767c4f789e77299fac627352d56.cu | #include "adagrad_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
__global__ void AdagradUpdate(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = nh[i] = decay * h[i] + gi * gi;
nw[i] = w[i] + lr[0] * gi / (std::sqrt(hi) + epsilon);
}
}
template <>
void adagrad_update<CUDAContext>(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
CUDAContext* context) {
AdagradUpdate<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, w, g, h, nw, nh, epsilon, decay, lr);
}
template <typename SIndex>
__global__ void SparseAdagradKernel(
const size_t N,
const size_t grad_slice_sz,
const float epsilon,
float *param,
float *param_mom,
const SIndex *indices,
const float *grad,
const float *lr)
{
const float LR = lr[0];
CUDA_1D_KERNEL_LOOP(i, N)
{
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
const float mom_new = param_mom[paramIdx] + grad[gradIdx] * grad[gradIdx];
param_mom[paramIdx] = mom_new;
param[paramIdx] += LR * grad[gradIdx] / (sqrt(mom_new) + epsilon);
}
}
template<>
template<typename SIndex>
bool SparseAdagradOp<float, CUDAContext>::DoRunWithType()
{
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
SparseAdagradKernel<SIndex><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N, grad_slice_sz, epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CUDAContext>);
}
|
db3f7cb8d5215f120e8ef13eaa448c37d962f64d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include <cusparse_v2.h>
#include <helper_cuda.h>
#include <iostream>
#include "gpu_sparse_operations.h"
using namespace std;
/*
This file contains the following functions:
MatrixVectorMultGPU => y = A * x
VectorAddGPU => w = u + v
VectorDotGPU => c = u * v
*/
void MatrixVectorMultGPU(struct Matrix *d_A, int A_m, int A_n, double *d_x, int x_m, double *d_y) {
/*
This function computes:
y = Ax
MatrixVectorMultGPU takes in 6 parameters:
A - matrix A
A_m - # of rows in A
A_n - # of columns in A
x - vector x
x_m - # of elements in x
y - vector y
*/
if (A_n != x_m) {
cout << "Matrix/Vector sizing error" << endl;
d_y = NULL;
return;
}
hipsparseHandle_t handle;
hipsparseCreate(&handle);
hipsparseMatDescr_t descA;
hipsparseCreateMatDescr(&descA);
const double alpha = 1.0f;
const double beta = 0.0f;
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, A_m, A_n, d_A->nnz, &alpha,
descA,
d_A->d_vals,
d_A->d_colind,
d_A->d_rowptr,
d_x,
&beta,
d_y);
hipsparseDestroy(handle);
}
__global__ void VectAdd(double *u, double *v, double a, double *w, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if (i < n) {
w[i] = u[i] + (a*v[i]);
}
}
void VectorAddGPU(double *d_u, double *d_v, double a, double *d_w, int n) {
/*
This function computes:
w = u + (a*v)
VectorAddGPU takes in 5 parameters:
u - vector u
v - vector v
a - double a
w - vector w
n - # of elements in u, v, w
*/
hipLaunchKernelGGL(( VectAdd), dim3(8), dim3(256), 0, 0, d_u, d_v, a, d_w, n);
}
double VectorDotGPU(double *d_u, double *d_v, int n) {
/*
This function computes:
c = u * v
VectorDotGPU takes 4 parameters:
u - vector u
v - vector v
c - scalar output
n - # of elements in u, v
Usage:
VectorDotGPU(h_u, h_v, &h_c, n);
*/
double *d_c;
double *c = (double *)malloc(sizeof(double));
checkCudaErrors(hipMalloc(&d_c, sizeof(double)));
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE);
hipblasDdot(handle, n,
d_u, 1,
d_v, 1,
d_c);
hipblasDestroy(handle);
checkCudaErrors(hipMemcpy(c, d_c, sizeof(double), hipMemcpyDeviceToHost));
return *c;
}
void ConjugateGradient(struct Matrix *A, int A_m, int A_n, double *b, double *x, int max_iter, double eps) {
/*
This function computes:
Ax = b
ConjugateGradient takes in 6 parameters:
A - matrix A
A_m - number of rows in A (assuming square matrix)
b - vector b
x - vector x, an initial guess
max_iter - maximum number of times to iterate
eps - the tolerance, or very small number that will tell us if it has converged
*/
double residual_old, residual_new, d, alpha, beta;
double *d_x, *d_b, *d_a_p, *d_r_k, *d_p_k;
int res_length = 0;
double *resids = (double *)malloc(sizeof(double) * max_iter);
checkCudaErrors(hipMalloc(&(A->d_colind), (A_m + 1) * sizeof(int)));
checkCudaErrors(hipMalloc(&(A->d_rowptr), A->nnz * sizeof(int)));
checkCudaErrors(hipMalloc(&(A->d_vals), A->nnz * sizeof(double)));
checkCudaErrors(hipMemcpy(A->d_colind, A->h_colind, (A_m + 1) * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(A->d_rowptr, A->h_rowptr, A->nnz * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(A->d_vals, A->h_vals, A->nnz * sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_x, A_m * sizeof(double)));
checkCudaErrors(hipMalloc(&d_b, A_m * sizeof(double)));
checkCudaErrors(hipMalloc(&d_p_k, A_m * sizeof(double)));
checkCudaErrors(hipMalloc(&d_a_p, A_m * sizeof(double)));
checkCudaErrors(hipMalloc(&d_r_k, A_m * sizeof(double)));
checkCudaErrors(hipMemcpy(d_x, x, A_m * sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, b, A_m * sizeof(double), hipMemcpyHostToDevice));
// Calculate inital residual, b-Ax with initial guess
MatrixVectorMultGPU(A, A_m, A_n, d_x, A_m, d_a_p); // ap = Ax
VectorAddGPU(d_b, d_a_p, -1.0, d_r_k, A_m); // r = b - ap
residual_old = VectorDotGPU(d_r_k, d_r_k, A_m); // res_o = dot(r, r)
checkCudaErrors(hipMemcpy(d_p_k, d_r_k, A_m * sizeof(double), hipMemcpyDeviceToDevice)); // p = r
// Iterate until converges or max_iter
for (int i = 0; i < max_iter; i++) { // for i:max_iterations:
MatrixVectorMultGPU(A, A_m, A_n, d_p_k, A_m, d_a_p); // ap = Ap
d = VectorDotGPU(d_p_k, d_a_p, A_m); // d = dot(p, ap)
alpha = residual_old / d; // alpha = res_o / d
VectorAddGPU(d_x, d_p_k, alpha, d_x, A_m); // x = x + (alpha * p)
VectorAddGPU(d_r_k, d_a_p, -alpha, d_r_k, A_m); // r = r - (alpha * ap)
residual_new = VectorDotGPU(d_r_k, d_r_k, A_m); // res_n = dot(r, r)
//printf("Iterations: %i Residual Old: %0.10lf\n", i, sqrt(residual_old));
//printf("Iterations: %i Residual New: %0.10lf\n", i, sqrt(residual_new));
if (sqrt(residual_new) < eps) { // if sqrt(res_n) < eps):
printf("Converged in iterations: %i Residual: %0.10lf\n", i, sqrt(residual_new));
break; // exit
}
beta = residual_new / residual_old; // beta = res_n / res_o
VectorAddGPU(d_r_k, d_p_k, beta, d_p_k, A_m); // p = r + (beta * p)
resids[res_length] = residual_old;
res_length++;
residual_old = residual_new; // res_o = res_n
}
hipMemcpy(x, d_x, A_m * sizeof(double), hipMemcpyDeviceToHost);
//FILE *fp = fopen("residuals.txt", "w+");
//fprintf(fp, "%i\n", res_length);
//for (int i = 0; i < res_length; i++) {
//fprintf(fp, "%0.10lf\n", resids[i]);
//}
//fclose(fp);
hipFree(d_x);
hipFree(d_b);
hipFree(d_a_p);
hipFree(d_r_k);
hipFree(d_p_k);
}
| db3f7cb8d5215f120e8ef13eaa448c37d962f64d.cu | #include <cublas_v2.h>
#include <cusparse_v2.h>
#include <helper_cuda.h>
#include <iostream>
#include "gpu_sparse_operations.h"
using namespace std;
/*
This file contains the following functions:
MatrixVectorMultGPU => y = A * x
VectorAddGPU => w = u + v
VectorDotGPU => c = u * v
*/
void MatrixVectorMultGPU(struct Matrix *d_A, int A_m, int A_n, double *d_x, int x_m, double *d_y) {
/*
This function computes:
y = Ax
MatrixVectorMultGPU takes in 6 parameters:
A - matrix A
A_m - # of rows in A
A_n - # of columns in A
x - vector x
x_m - # of elements in x
y - vector y
*/
if (A_n != x_m) {
cout << "Matrix/Vector sizing error" << endl;
d_y = NULL;
return;
}
cusparseHandle_t handle;
cusparseCreate(&handle);
cusparseMatDescr_t descA;
cusparseCreateMatDescr(&descA);
const double alpha = 1.0f;
const double beta = 0.0f;
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, A_m, A_n, d_A->nnz, &alpha,
descA,
d_A->d_vals,
d_A->d_colind,
d_A->d_rowptr,
d_x,
&beta,
d_y);
cusparseDestroy(handle);
}
__global__ void VectAdd(double *u, double *v, double a, double *w, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if (i < n) {
w[i] = u[i] + (a*v[i]);
}
}
void VectorAddGPU(double *d_u, double *d_v, double a, double *d_w, int n) {
/*
This function computes:
w = u + (a*v)
VectorAddGPU takes in 5 parameters:
u - vector u
v - vector v
a - double a
w - vector w
n - # of elements in u, v, w
*/
VectAdd<<<8, 256>>>(d_u, d_v, a, d_w, n);
}
double VectorDotGPU(double *d_u, double *d_v, int n) {
/*
This function computes:
c = u * v
VectorDotGPU takes 4 parameters:
u - vector u
v - vector v
c - scalar output
n - # of elements in u, v
Usage:
VectorDotGPU(h_u, h_v, &h_c, n);
*/
double *d_c;
double *c = (double *)malloc(sizeof(double));
checkCudaErrors(cudaMalloc(&d_c, sizeof(double)));
cublasHandle_t handle;
cublasCreate(&handle);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
cublasDdot(handle, n,
d_u, 1,
d_v, 1,
d_c);
cublasDestroy(handle);
checkCudaErrors(cudaMemcpy(c, d_c, sizeof(double), cudaMemcpyDeviceToHost));
return *c;
}
void ConjugateGradient(struct Matrix *A, int A_m, int A_n, double *b, double *x, int max_iter, double eps) {
/*
This function computes:
Ax = b
ConjugateGradient takes in 6 parameters:
A - matrix A
A_m - number of rows in A (assuming square matrix)
b - vector b
x - vector x, an initial guess
max_iter - maximum number of times to iterate
eps - the tolerance, or very small number that will tell us if it has converged
*/
double residual_old, residual_new, d, alpha, beta;
double *d_x, *d_b, *d_a_p, *d_r_k, *d_p_k;
int res_length = 0;
double *resids = (double *)malloc(sizeof(double) * max_iter);
checkCudaErrors(cudaMalloc(&(A->d_colind), (A_m + 1) * sizeof(int)));
checkCudaErrors(cudaMalloc(&(A->d_rowptr), A->nnz * sizeof(int)));
checkCudaErrors(cudaMalloc(&(A->d_vals), A->nnz * sizeof(double)));
checkCudaErrors(cudaMemcpy(A->d_colind, A->h_colind, (A_m + 1) * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(A->d_rowptr, A->h_rowptr, A->nnz * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(A->d_vals, A->h_vals, A->nnz * sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_x, A_m * sizeof(double)));
checkCudaErrors(cudaMalloc(&d_b, A_m * sizeof(double)));
checkCudaErrors(cudaMalloc(&d_p_k, A_m * sizeof(double)));
checkCudaErrors(cudaMalloc(&d_a_p, A_m * sizeof(double)));
checkCudaErrors(cudaMalloc(&d_r_k, A_m * sizeof(double)));
checkCudaErrors(cudaMemcpy(d_x, x, A_m * sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, b, A_m * sizeof(double), cudaMemcpyHostToDevice));
// Calculate inital residual, b-Ax with initial guess
MatrixVectorMultGPU(A, A_m, A_n, d_x, A_m, d_a_p); // ap = Ax
VectorAddGPU(d_b, d_a_p, -1.0, d_r_k, A_m); // r = b - ap
residual_old = VectorDotGPU(d_r_k, d_r_k, A_m); // res_o = dot(r, r)
checkCudaErrors(cudaMemcpy(d_p_k, d_r_k, A_m * sizeof(double), cudaMemcpyDeviceToDevice)); // p = r
// Iterate until converges or max_iter
for (int i = 0; i < max_iter; i++) { // for i:max_iterations:
MatrixVectorMultGPU(A, A_m, A_n, d_p_k, A_m, d_a_p); // ap = Ap
d = VectorDotGPU(d_p_k, d_a_p, A_m); // d = dot(p, ap)
alpha = residual_old / d; // alpha = res_o / d
VectorAddGPU(d_x, d_p_k, alpha, d_x, A_m); // x = x + (alpha * p)
VectorAddGPU(d_r_k, d_a_p, -alpha, d_r_k, A_m); // r = r - (alpha * ap)
residual_new = VectorDotGPU(d_r_k, d_r_k, A_m); // res_n = dot(r, r)
//printf("Iterations: %i Residual Old: %0.10lf\n", i, sqrt(residual_old));
//printf("Iterations: %i Residual New: %0.10lf\n", i, sqrt(residual_new));
if (sqrt(residual_new) < eps) { // if sqrt(res_n) < eps):
printf("Converged in iterations: %i Residual: %0.10lf\n", i, sqrt(residual_new));
break; // exit
}
beta = residual_new / residual_old; // beta = res_n / res_o
VectorAddGPU(d_r_k, d_p_k, beta, d_p_k, A_m); // p = r + (beta * p)
resids[res_length] = residual_old;
res_length++;
residual_old = residual_new; // res_o = res_n
}
cudaMemcpy(x, d_x, A_m * sizeof(double), cudaMemcpyDeviceToHost);
//FILE *fp = fopen("residuals.txt", "w+");
//fprintf(fp, "%i\n", res_length);
//for (int i = 0; i < res_length; i++) {
//fprintf(fp, "%0.10lf\n", resids[i]);
//}
//fclose(fp);
cudaFree(d_x);
cudaFree(d_b);
cudaFree(d_a_p);
cudaFree(d_r_k);
cudaFree(d_p_k);
}
|
b0d501cfa7f08f65c180251d9f7f2d1017d87a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp < (var_2 * var_3)) {
for (int i=0; i < var_1; ++i) {
comp += var_4 / var_5 - -1.6619E-37f / (-1.2160E3f * var_6);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
hipDeviceSynchronize();
return 0;
}
| b0d501cfa7f08f65c180251d9f7f2d1017d87a6a.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp < (var_2 * var_3)) {
for (int i=0; i < var_1; ++i) {
comp += var_4 / var_5 - -1.6619E-37f / (-1.2160E3f * var_6);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
cudaDeviceSynchronize();
return 0;
}
|
bac0facd72d226700755344072304fbbebe03de4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 1000
__global__ void max(int *a , int *c) // kernel function definition
{
int i = threadIdx.x; // initialize i to thread ID
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed
int a[SIZE];
int c;
int *dev_a, *dev_c; //GPU / device parameters
hipMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU
hipMalloc((void **) &dev_c, SIZE*sizeof(int));
for( i = 0 ; i < SIZE ; i++)
{
a[i] = i; // rand()% 1000 + 1; // input the numbers
}
hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice); //copy the array from CPU to GPU
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c); // call kernel function <<<number of blocks, number of threads
hipMemcpy(&c, dev_c, SIZE*sizeof(int),hipMemcpyDeviceToHost); // copy the result back from GPU to CPU
printf("\nmax = %d ",c);
hipFree(dev_a); // Free the allocated memory
hipFree(dev_c);
printf("");
return 0;
}
| bac0facd72d226700755344072304fbbebe03de4.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define SIZE 1000
__global__ void max(int *a , int *c) // kernel function definition
{
int i = threadIdx.x; // initialize i to thread ID
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed
int a[SIZE];
int c;
int *dev_a, *dev_c; //GPU / device parameters
cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
for( i = 0 ; i < SIZE ; i++)
{
a[i] = i; // rand()% 1000 + 1; // input the numbers
}
cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice); //copy the array from CPU to GPU
max<<<1,SIZE>>>(dev_a,dev_c); // call kernel function <<<number of blocks, number of threads
cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU
printf("\nmax = %d ",c);
cudaFree(dev_a); // Free the allocated memory
cudaFree(dev_c);
printf("");
return 0;
}
|
4335528661754ffad64e4d4fc06e783484e68977.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomic instructions
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==curr)
{
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = graph.get_edge_index_end(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
*changed=true;
vplist[vid]=curr+1;
}
}
}
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
hipEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph, curr, device_over);
cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) );
curr++;
}while(stop);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_vpl) );
}
| 4335528661754ffad64e4d4fc06e783484e68977.cu | //=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomic instructions
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==curr)
{
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = graph.get_edge_index_end(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
*changed=true;
vplist[vid]=curr+1;
}
}
}
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
cudaEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) );
kernel<<<num_block, num_thread_per_block>>>(device_vpl, d_graph, curr, device_over);
cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) );
curr++;
}while(stop);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_vpl) );
}
|
16b0955ef9bf632bca234c509d0a062c17f932ee.hip | // !!! This is a file automatically generated by hipify!!!
/*
Implements the sequential cuda vectors.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <petsc/private/vecimpl.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#undef __FUNCT__
#define __FUNCT__ "VecCUDAAllocateCheck"
/*
Allocates space for the vector array on the GPU if it does not exist.
Does NOT change the PetscCUDAFlag for the vector
Does NOT zero the CUDA array
*/
PetscErrorCode VecCUDAAllocateCheck(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
hipStream_t stream;
Vec_CUDA *veccuda;
PetscFunctionBegin;
if (!v->spptr) {
ierr = PetscMalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)v->spptr;
err = hipMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err);
veccuda->GPUarray = veccuda->GPUarray_allocated;
err = hipStreamCreate(&stream);CHKERRCUDA(err);
veccuda->stream = stream;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
v->ops->destroy = VecDestroy_SeqCUDA;
if (v->valid_GPU_array == PETSC_CUDA_UNALLOCATED) {
if (v->data && ((Vec_Seq*)v->data)->array) {
v->valid_GPU_array = PETSC_CUDA_CPU;
} else {
v->valid_GPU_array = PETSC_CUDA_GPU;
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPU"
/* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */
PetscErrorCode VecCUDACopyToGPU(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = hipMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPUSome"
PetscErrorCode VecCUDACopyToGPUSome(Vec v, PetscCUDAIndices ci)
{
PetscScalar *varray;
PetscErrorCode ierr;
hipError_t err;
PetscScalar *cpuPtr, *gpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
s = (Vec_Seq*)v->data;
ierr = PetscLogEventBegin(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
varray = ((Vec_CUDA*)v->spptr)->GPUarray;
gpuPtr = varray + ptop_scatter->recvLowestIndex;
cpuPtr = s->array + ptop_scatter->recvLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = hipMemcpy(gpuPtr,cpuPtr,ptop_scatter->nr*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
// Set the buffer states
v->valid_GPU_array = PETSC_CUDA_BOTH;
ierr = PetscLogEventEnd(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPU"
/*
VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU
*/
PetscErrorCode VecCUDACopyFromGPU(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = hipMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPUSome"
/* Note that this function only copies *some* of the values up from the GPU to CPU,
which means that we need recombine the data at some point before using any of the standard functions.
We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray
where you have to always call in pairs
*/
PetscErrorCode VecCUDACopyFromGPUSome(Vec v, PetscCUDAIndices ci)
{
const PetscScalar *varray, *gpuPtr;
PetscErrorCode ierr;
hipError_t err;
PetscScalar *cpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
varray=((Vec_CUDA*)v->spptr)->GPUarray;
s = (Vec_Seq*)v->data;
gpuPtr = varray + ptop_scatter->sendLowestIndex;
cpuPtr = s->array + ptop_scatter->sendLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = hipMemcpy(cpuPtr,gpuPtr,ptop_scatter->ns*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(v,&varray);CHKERRQ(ierr);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA
Options Database Keys:
. -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq()
M*/
#undef __FUNCT__
#define __FUNCT__ "VecAYPX_SeqCUDA"
PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
PetscScalar sone=1.0;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = hipMemcpy(yarray,xarray,bn*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
} else if (alpha == (PetscScalar)1.0) {
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
} else {
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPY_SeqCUDA"
PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
if (alpha != (PetscScalar)0.0) {
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseDivide_SeqCUDA"
PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin)
{
PetscInt n = xin->map->n;
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = PetscLogFlops(n);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecWAXPY_SeqCUDA"
PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin)
{
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
err = hipMemcpy(warray,yarray,win->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecMAXPY_SeqCUDA"
PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n,j,j_rem;
PetscScalar alpha0,alpha1,alpha2,alpha3;
PetscFunctionBegin;
ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr);
switch (j_rem=nv&0x3) {
case 3:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha += 3;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
y += 3;
break;
case 2:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha +=2;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
y +=2;
break;
case 1:
alpha0 = *alpha++;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
y +=1;
break;
}
for (j=j_rem; j<nv; j+=4) {
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha3 = alpha[3];
alpha += 4;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha3,y[3]);CHKERRQ(ierr);
y += 4;
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDot_SeqCUDA"
PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
/* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */
cberr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n >0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
//
// CUDA kernels for MDot to follow
//
// set work group size to be a power of 2 (128 is usually a good compromise between portability and speed)
#define MDOT_WORKGROUP_SIZE 128
#define MDOT_WORKGROUP_NUM 128
#if !defined(PETSC_USE_COMPLEX)
// M = 2:
__global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE];
}
}
// M = 3:
__global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
}
}
// M = 4:
__global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
}
}
// M = 8:
__global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
PetscScalar group_sum4 = 0;
PetscScalar group_sum5 = 0;
PetscScalar group_sum6 = 0;
PetscScalar group_sum7 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
group_sum4 += entry_x * y4[i];
group_sum5 += entry_x * y5[i];
group_sum6 += entry_x * y6[i];
group_sum7 += entry_x * y7[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4;
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5;
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6;
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE];
}
}
#endif /* !defined(PETSC_USE_COMPLEX) */
#undef __FUNCT__
#define __FUNCT__ "VecMDot_SeqCUDA"
PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z)
{
PetscErrorCode ierr;
PetscInt i,n = xin->map->n,current_y_index = 0;
const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr;
PetscScalar *group_results_gpu;
#if !defined(PETSC_USE_COMPLEX)
PetscInt j;
PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel
#endif
hipError_t cuda_ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive.");
/* Handle the case of local size zero first */
if (!xin->map->n) {
for (i=0; i<nv; ++i) z[i] = 0;
PetscFunctionReturn(0);
}
// allocate scratchpad memory for the results of individual work groups:
cuda_ierr = hipMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8);CHKERRCUDA(cuda_ierr);
ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr);
while (current_y_index < nv)
{
switch (nv - current_y_index) {
case 7:
case 6:
case 5:
case 4:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel4), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<4; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
current_y_index += 4;
break;
case 3:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel3), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<3; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
current_y_index += 3;
break;
case 2:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel2), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<2; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
current_y_index += 2;
break;
case 1:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
current_y_index += 1;
break;
default: // 8 or more vectors left
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel8), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<8; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
current_y_index += 8;
break;
}
}
ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr);
cuda_ierr = hipFree(group_results_gpu);CHKERRCUDA(cuda_ierr);
ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef MDOT_WORKGROUP_SIZE
#undef MDOT_WORKGROUP_NUM
#undef __FUNCT__
#define __FUNCT__ "VecSet_SeqCUDA"
PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscInt n = xin->map->n;
PetscScalar *xarray=NULL;
thrust::device_ptr<PetscScalar> xptr;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = hipMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err);
} else {
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::fill(xptr,xptr+n,alpha);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayWrite(xin,&xarray);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScale_SeqCUDA"
PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscScalar *xarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
if (alpha == (PetscScalar)0.0) {
ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr);
} else if (alpha != (PetscScalar)1.0) {
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecTDot_SeqCUDA"
PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n > 0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCopy_SeqCUDA"
PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (xin != yin) {
if (xin->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU if we are on the CPU*/
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */
if (yin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU */
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_GPU) {
/* copy in GPU */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck
default to copy in GPU (this is an arbitrary choice) */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecSwap_SeqCUDA"
PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscBLASInt one = 1,bn;
PetscScalar *xarray,*yarray;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (xin != yin) {
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBY_SeqCUDA"
PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin)
{
PetscErrorCode ierr;
PetscScalar a = alpha,b = beta;
const PetscScalar *xarray;
PetscScalar *yarray;
PetscBLASInt one = 1, bn;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
if (a == (PetscScalar)0.0) {
ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr);
} else if (b == (PetscScalar)1.0) {
ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr);
} else if (a == (PetscScalar)1.0) {
ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr);
} else if (b == (PetscScalar)0.0) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBYPCZ_SeqCUDA"
PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscInt n = zin->map->n;
PetscFunctionBegin;
if (gamma == (PetscScalar)1.0) {
/* z = ax + b*y + z */
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
} else {
/* z = a*x + b*y + c*z */
ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseMult_SeqCUDA"
PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin)
{
PetscInt n = win->map->n;
const PetscScalar *xarray,*yarray;
PetscScalar *warray;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = PetscLogFlops(n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* should do infinity norm in cuda */
#undef __FUNCT__
#define __FUNCT__ "VecNorm_SeqCUDA"
PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n;
PetscBLASInt one = 1, bn;
const PetscScalar *xarray;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr);
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
PetscInt i;
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr);
err = hipMemcpy(z,xarray+i,sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
} else if (type == NORM_1) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDotNorm2_SeqCUDA"
PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm)
{
PetscErrorCode ierr;
PetscReal n=s->map->n;
const PetscScalar *sarray,*tarray;
PetscFunctionBegin;
ierr = VecCUDAGetArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDestroy_SeqCUDA"
PetscErrorCode VecDestroy_SeqCUDA(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_COMPLEX)
struct conjugate
{
__host__ __device__
PetscScalar operator()(PetscScalar x)
{
return PetscConj(x);
}
};
#endif
#undef __FUNCT__
#define __FUNCT__ "VecConjugate_SeqCUDA"
PetscErrorCode VecConjugate_SeqCUDA(Vec xin)
{
PetscScalar *xarray;
PetscErrorCode ierr;
#if defined(PETSC_USE_COMPLEX)
PetscInt n = xin->map->n;
thrust::device_ptr<PetscScalar> xptr;
#endif
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::transform(xptr,xptr+n,xptr,conjugate());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
#endif
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecGetLocalVector_SeqCUDA"
PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
hipError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (w->data) {
if (((Vec_Seq*)w->data)->array_allocated) {
ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr);
}
((Vec_Seq*)w->data)->array = NULL;
((Vec_Seq*)w->data)->unplacedarray = NULL;
}
if (w->spptr) {
if (((Vec_CUDA*)w->spptr)->GPUarray) {
err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
}
err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
if (v->petscnative) {
ierr = PetscFree(w->data);CHKERRQ(ierr);
w->data = v->data;
w->valid_GPU_array = v->valid_GPU_array;
w->spptr = v->spptr;
ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr);
} else {
ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
w->valid_GPU_array = PETSC_CUDA_CPU;
ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecRestoreLocalVector_SeqCUDA"
PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
hipError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (v->petscnative) {
v->data = w->data;
v->valid_GPU_array = w->valid_GPU_array;
v->spptr = w->spptr;
ierr = VecCUDACopyFromGPU(v);CHKERRQ(ierr);
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
w->data = 0;
w->valid_GPU_array = PETSC_CUDA_UNALLOCATED;
w->spptr = 0;
} else {
ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
if ((Vec_CUDA*)w->spptr) {
err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayReadWrite"
/*@C
VecCUDAGetArrayReadWrite - Provides access to the CUDA buffer inside a vector.
This function has semantics similar to VecGetArray(): the pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayReadWrite() assumes that
the user will modify the vector data. This is similar to
intent(inout) in fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayReadWrite(). Upon restoring the vector data
the data on the host will be marked as out of date. A subsequent
access of the host data will thus incur a data transfer from the
device to the host.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA device pointer
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayReadWrite"
/*@C
VecCUDARestoreArrayReadWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayReadWrite().
This marks the host data as out of date. Subsequent access to the
vector data on the host side with for instance VecGetArray() incurs a
data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayReadWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayRead"
/*@C
VecCUDAGetArrayRead - Provides read access to the CUDA buffer inside a vector.
This function is analogous to VecGetArrayRead(): The pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayRead() assumes that the
user will not modify the vector data. This is analgogous to
intent(in) in Fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayRead(). If the data on the host side was
previously up to date it will remain so, i.e. data on both the device
and the host is up to date. Accessing data on the host side does not
incur a device to host data transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayRead(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayRead(Vec v, const PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayRead"
/*@C
VecCUDARestoreArrayRead - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayRead().
If the data on the host side was previously up to date it will remain
so, i.e. data on both the device and the host is up to date.
Accessing data on the host side e.g. with VecGetArray() does not
incur a device to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayRead() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayRead(Vec v, const PetscScalar **a)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayWrite"
/*@C
VecCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a vector.
The data pointed to by the device pointer is uninitialized. The user
may not read from this data. Furthermore, the entire array needs to
be filled by the user to obtain well-defined behaviour. The device
memory will be allocated by this function if it hasn't been allocated
previously. This is analogous to intent(out) in Fortran.
The device pointer needs to be released with
VecCUDARestoreArrayWrite(). When the pointer is released the
host data of the vector is marked as out of data. Subsequent access
of the host data with e.g. VecGetArray() incurs a device to host data
transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer
Fortran note:
This function is not currently available from Fortran.
Level: advanced
.seealso: VecCUDARestoreArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayWrite"
/*@C
VecCUDARestoreArrayWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayWrite().
Data on the host will be marked as out of date. Subsequent access of
the data on the host side e.g. with VecGetArray() will incur a device
to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAPlaceArray"
/*@C
VecCUDAPlaceArray - Allows one to replace the GPU array in a vector with a
GPU array provided by the user. This is useful to avoid copying an
array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
You can return to the original GPU array with a call to VecCUDAResetArray()
It is not possible to use VecCUDAPlaceArray() and VecPlaceArray() at the
same time on the same vector.
Level: developer
.seealso: VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAPlaceArray(Vec vin,PetscScalar *a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
if (((Vec_Seq*)vin->data)->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecCUDAPlaceArray()/VecPlaceArray() was already called on this vector, without a call to VecCUDAResetArray()/VecResetArray()");
((Vec_Seq*)vin->data)->unplacedarray = (PetscScalar *) ((Vec_CUDA*)vin->spptr)->GPUarray; /* save previous GPU array so reset can bring it back */
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAReplaceArray"
/*@C
VecCUDAReplaceArray - Allows one to replace the GPU array in a vector
with a GPU array provided by the user. This is useful to avoid copying
a GPU array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
This permanently replaces the GPU array and frees the memory associated
with the old GPU array.
The memory passed in CANNOT be freed by the user. It will be freed
when the vector is destroyed.
Not supported from Fortran
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecPlaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAPlaceArray(), VecReplaceArray()
@*/
PetscErrorCode VecCUDAReplaceArray(Vec vin,PetscScalar *a)
{
hipError_t err;
PetscFunctionBegin;
err = hipFree(((Vec_CUDA*)vin->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAResetArray"
/*@C
VecCUDAResetArray - Resets a vector to use its default memory. Call this
after the use of VecCUDAPlaceArray().
Not Collective
Input Parameters:
. vec - the vector
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecPlaceArray(), VecResetArray(), VecCUDAPlaceArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAResetArray(Vec vin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
((Vec_CUDA*)vin->spptr)->GPUarray = (PetscScalar *) ((Vec_Seq*)vin->data)->unplacedarray;
((Vec_Seq*)vin->data)->unplacedarray = 0;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
| 16b0955ef9bf632bca234c509d0a062c17f932ee.cu | /*
Implements the sequential cuda vectors.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <petsc/private/vecimpl.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#include <cuda_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#undef __FUNCT__
#define __FUNCT__ "VecCUDAAllocateCheck"
/*
Allocates space for the vector array on the GPU if it does not exist.
Does NOT change the PetscCUDAFlag for the vector
Does NOT zero the CUDA array
*/
PetscErrorCode VecCUDAAllocateCheck(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
cudaStream_t stream;
Vec_CUDA *veccuda;
PetscFunctionBegin;
if (!v->spptr) {
ierr = PetscMalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)v->spptr;
err = cudaMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err);
veccuda->GPUarray = veccuda->GPUarray_allocated;
err = cudaStreamCreate(&stream);CHKERRCUDA(err);
veccuda->stream = stream;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
v->ops->destroy = VecDestroy_SeqCUDA;
if (v->valid_GPU_array == PETSC_CUDA_UNALLOCATED) {
if (v->data && ((Vec_Seq*)v->data)->array) {
v->valid_GPU_array = PETSC_CUDA_CPU;
} else {
v->valid_GPU_array = PETSC_CUDA_GPU;
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPU"
/* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */
PetscErrorCode VecCUDACopyToGPU(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = cudaMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPUSome"
PetscErrorCode VecCUDACopyToGPUSome(Vec v, PetscCUDAIndices ci)
{
PetscScalar *varray;
PetscErrorCode ierr;
cudaError_t err;
PetscScalar *cpuPtr, *gpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
s = (Vec_Seq*)v->data;
ierr = PetscLogEventBegin(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
varray = ((Vec_CUDA*)v->spptr)->GPUarray;
gpuPtr = varray + ptop_scatter->recvLowestIndex;
cpuPtr = s->array + ptop_scatter->recvLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = cudaMemcpy(gpuPtr,cpuPtr,ptop_scatter->nr*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
// Set the buffer states
v->valid_GPU_array = PETSC_CUDA_BOTH;
ierr = PetscLogEventEnd(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPU"
/*
VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU
*/
PetscErrorCode VecCUDACopyFromGPU(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = cudaMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPUSome"
/* Note that this function only copies *some* of the values up from the GPU to CPU,
which means that we need recombine the data at some point before using any of the standard functions.
We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray
where you have to always call in pairs
*/
PetscErrorCode VecCUDACopyFromGPUSome(Vec v, PetscCUDAIndices ci)
{
const PetscScalar *varray, *gpuPtr;
PetscErrorCode ierr;
cudaError_t err;
PetscScalar *cpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
varray=((Vec_CUDA*)v->spptr)->GPUarray;
s = (Vec_Seq*)v->data;
gpuPtr = varray + ptop_scatter->sendLowestIndex;
cpuPtr = s->array + ptop_scatter->sendLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = cudaMemcpy(cpuPtr,gpuPtr,ptop_scatter->ns*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(v,&varray);CHKERRQ(ierr);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA
Options Database Keys:
. -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq()
M*/
#undef __FUNCT__
#define __FUNCT__ "VecAYPX_SeqCUDA"
PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
PetscScalar sone=1.0;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = cudaMemcpy(yarray,xarray,bn*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
} else if (alpha == (PetscScalar)1.0) {
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
} else {
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPY_SeqCUDA"
PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
if (alpha != (PetscScalar)0.0) {
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseDivide_SeqCUDA"
PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin)
{
PetscInt n = xin->map->n;
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = PetscLogFlops(n);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecWAXPY_SeqCUDA"
PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin)
{
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
err = cudaMemcpy(warray,yarray,win->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecMAXPY_SeqCUDA"
PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n,j,j_rem;
PetscScalar alpha0,alpha1,alpha2,alpha3;
PetscFunctionBegin;
ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr);
switch (j_rem=nv&0x3) {
case 3:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha += 3;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
y += 3;
break;
case 2:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha +=2;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
y +=2;
break;
case 1:
alpha0 = *alpha++;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
y +=1;
break;
}
for (j=j_rem; j<nv; j+=4) {
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha3 = alpha[3];
alpha += 4;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha3,y[3]);CHKERRQ(ierr);
y += 4;
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDot_SeqCUDA"
PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
/* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */
cberr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n >0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
//
// CUDA kernels for MDot to follow
//
// set work group size to be a power of 2 (128 is usually a good compromise between portability and speed)
#define MDOT_WORKGROUP_SIZE 128
#define MDOT_WORKGROUP_NUM 128
#if !defined(PETSC_USE_COMPLEX)
// M = 2:
__global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE];
}
}
// M = 3:
__global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
}
}
// M = 4:
__global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
}
}
// M = 8:
__global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
PetscScalar group_sum4 = 0;
PetscScalar group_sum5 = 0;
PetscScalar group_sum6 = 0;
PetscScalar group_sum7 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
group_sum4 += entry_x * y4[i];
group_sum5 += entry_x * y5[i];
group_sum6 += entry_x * y6[i];
group_sum7 += entry_x * y7[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4;
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5;
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6;
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE];
}
}
#endif /* !defined(PETSC_USE_COMPLEX) */
#undef __FUNCT__
#define __FUNCT__ "VecMDot_SeqCUDA"
PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z)
{
PetscErrorCode ierr;
PetscInt i,n = xin->map->n,current_y_index = 0;
const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr;
PetscScalar *group_results_gpu;
#if !defined(PETSC_USE_COMPLEX)
PetscInt j;
PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel
#endif
cudaError_t cuda_ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive.");
/* Handle the case of local size zero first */
if (!xin->map->n) {
for (i=0; i<nv; ++i) z[i] = 0;
PetscFunctionReturn(0);
}
// allocate scratchpad memory for the results of individual work groups:
cuda_ierr = cudaMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8);CHKERRCUDA(cuda_ierr);
ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr);
while (current_y_index < nv)
{
switch (nv - current_y_index) {
case 7:
case 6:
case 5:
case 4:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel4<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<4; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
current_y_index += 4;
break;
case 3:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel3<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<3; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
current_y_index += 3;
break;
case 2:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel2<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<2; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
current_y_index += 2;
break;
case 1:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
current_y_index += 1;
break;
default: // 8 or more vectors left
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel8<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<8; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
current_y_index += 8;
break;
}
}
ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr);
cuda_ierr = cudaFree(group_results_gpu);CHKERRCUDA(cuda_ierr);
ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef MDOT_WORKGROUP_SIZE
#undef MDOT_WORKGROUP_NUM
#undef __FUNCT__
#define __FUNCT__ "VecSet_SeqCUDA"
PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscInt n = xin->map->n;
PetscScalar *xarray=NULL;
thrust::device_ptr<PetscScalar> xptr;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = cudaMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err);
} else {
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::fill(xptr,xptr+n,alpha);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayWrite(xin,&xarray);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScale_SeqCUDA"
PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscScalar *xarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
if (alpha == (PetscScalar)0.0) {
ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr);
} else if (alpha != (PetscScalar)1.0) {
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecTDot_SeqCUDA"
PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n > 0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCopy_SeqCUDA"
PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (xin != yin) {
if (xin->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU if we are on the CPU*/
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */
if (yin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU */
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_GPU) {
/* copy in GPU */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck
default to copy in GPU (this is an arbitrary choice) */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecSwap_SeqCUDA"
PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscBLASInt one = 1,bn;
PetscScalar *xarray,*yarray;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (xin != yin) {
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBY_SeqCUDA"
PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin)
{
PetscErrorCode ierr;
PetscScalar a = alpha,b = beta;
const PetscScalar *xarray;
PetscScalar *yarray;
PetscBLASInt one = 1, bn;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
if (a == (PetscScalar)0.0) {
ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr);
} else if (b == (PetscScalar)1.0) {
ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr);
} else if (a == (PetscScalar)1.0) {
ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr);
} else if (b == (PetscScalar)0.0) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBYPCZ_SeqCUDA"
PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscInt n = zin->map->n;
PetscFunctionBegin;
if (gamma == (PetscScalar)1.0) {
/* z = ax + b*y + z */
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
} else {
/* z = a*x + b*y + c*z */
ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseMult_SeqCUDA"
PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin)
{
PetscInt n = win->map->n;
const PetscScalar *xarray,*yarray;
PetscScalar *warray;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = PetscLogFlops(n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* should do infinity norm in cuda */
#undef __FUNCT__
#define __FUNCT__ "VecNorm_SeqCUDA"
PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n;
PetscBLASInt one = 1, bn;
const PetscScalar *xarray;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr);
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
PetscInt i;
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr);
err = cudaMemcpy(z,xarray+i,sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
} else if (type == NORM_1) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDotNorm2_SeqCUDA"
PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm)
{
PetscErrorCode ierr;
PetscReal n=s->map->n;
const PetscScalar *sarray,*tarray;
PetscFunctionBegin;
ierr = VecCUDAGetArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDestroy_SeqCUDA"
PetscErrorCode VecDestroy_SeqCUDA(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_COMPLEX)
struct conjugate
{
__host__ __device__
PetscScalar operator()(PetscScalar x)
{
return PetscConj(x);
}
};
#endif
#undef __FUNCT__
#define __FUNCT__ "VecConjugate_SeqCUDA"
PetscErrorCode VecConjugate_SeqCUDA(Vec xin)
{
PetscScalar *xarray;
PetscErrorCode ierr;
#if defined(PETSC_USE_COMPLEX)
PetscInt n = xin->map->n;
thrust::device_ptr<PetscScalar> xptr;
#endif
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::transform(xptr,xptr+n,xptr,conjugate());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
#endif
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecGetLocalVector_SeqCUDA"
PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
cudaError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (w->data) {
if (((Vec_Seq*)w->data)->array_allocated) {
ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr);
}
((Vec_Seq*)w->data)->array = NULL;
((Vec_Seq*)w->data)->unplacedarray = NULL;
}
if (w->spptr) {
if (((Vec_CUDA*)w->spptr)->GPUarray) {
err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
}
err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
if (v->petscnative) {
ierr = PetscFree(w->data);CHKERRQ(ierr);
w->data = v->data;
w->valid_GPU_array = v->valid_GPU_array;
w->spptr = v->spptr;
ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr);
} else {
ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
w->valid_GPU_array = PETSC_CUDA_CPU;
ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecRestoreLocalVector_SeqCUDA"
PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
cudaError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (v->petscnative) {
v->data = w->data;
v->valid_GPU_array = w->valid_GPU_array;
v->spptr = w->spptr;
ierr = VecCUDACopyFromGPU(v);CHKERRQ(ierr);
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
w->data = 0;
w->valid_GPU_array = PETSC_CUDA_UNALLOCATED;
w->spptr = 0;
} else {
ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
if ((Vec_CUDA*)w->spptr) {
err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayReadWrite"
/*@C
VecCUDAGetArrayReadWrite - Provides access to the CUDA buffer inside a vector.
This function has semantics similar to VecGetArray(): the pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayReadWrite() assumes that
the user will modify the vector data. This is similar to
intent(inout) in fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayReadWrite(). Upon restoring the vector data
the data on the host will be marked as out of date. A subsequent
access of the host data will thus incur a data transfer from the
device to the host.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA device pointer
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayReadWrite"
/*@C
VecCUDARestoreArrayReadWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayReadWrite().
This marks the host data as out of date. Subsequent access to the
vector data on the host side with for instance VecGetArray() incurs a
data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayReadWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayRead"
/*@C
VecCUDAGetArrayRead - Provides read access to the CUDA buffer inside a vector.
This function is analogous to VecGetArrayRead(): The pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayRead() assumes that the
user will not modify the vector data. This is analgogous to
intent(in) in Fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayRead(). If the data on the host side was
previously up to date it will remain so, i.e. data on both the device
and the host is up to date. Accessing data on the host side does not
incur a device to host data transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayRead(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayRead(Vec v, const PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayRead"
/*@C
VecCUDARestoreArrayRead - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayRead().
If the data on the host side was previously up to date it will remain
so, i.e. data on both the device and the host is up to date.
Accessing data on the host side e.g. with VecGetArray() does not
incur a device to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayRead() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayRead(Vec v, const PetscScalar **a)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayWrite"
/*@C
VecCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a vector.
The data pointed to by the device pointer is uninitialized. The user
may not read from this data. Furthermore, the entire array needs to
be filled by the user to obtain well-defined behaviour. The device
memory will be allocated by this function if it hasn't been allocated
previously. This is analogous to intent(out) in Fortran.
The device pointer needs to be released with
VecCUDARestoreArrayWrite(). When the pointer is released the
host data of the vector is marked as out of data. Subsequent access
of the host data with e.g. VecGetArray() incurs a device to host data
transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer
Fortran note:
This function is not currently available from Fortran.
Level: advanced
.seealso: VecCUDARestoreArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayWrite"
/*@C
VecCUDARestoreArrayWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayWrite().
Data on the host will be marked as out of date. Subsequent access of
the data on the host side e.g. with VecGetArray() will incur a device
to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAPlaceArray"
/*@C
VecCUDAPlaceArray - Allows one to replace the GPU array in a vector with a
GPU array provided by the user. This is useful to avoid copying an
array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
You can return to the original GPU array with a call to VecCUDAResetArray()
It is not possible to use VecCUDAPlaceArray() and VecPlaceArray() at the
same time on the same vector.
Level: developer
.seealso: VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAPlaceArray(Vec vin,PetscScalar *a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
if (((Vec_Seq*)vin->data)->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecCUDAPlaceArray()/VecPlaceArray() was already called on this vector, without a call to VecCUDAResetArray()/VecResetArray()");
((Vec_Seq*)vin->data)->unplacedarray = (PetscScalar *) ((Vec_CUDA*)vin->spptr)->GPUarray; /* save previous GPU array so reset can bring it back */
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAReplaceArray"
/*@C
VecCUDAReplaceArray - Allows one to replace the GPU array in a vector
with a GPU array provided by the user. This is useful to avoid copying
a GPU array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
This permanently replaces the GPU array and frees the memory associated
with the old GPU array.
The memory passed in CANNOT be freed by the user. It will be freed
when the vector is destroyed.
Not supported from Fortran
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecPlaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAPlaceArray(), VecReplaceArray()
@*/
PetscErrorCode VecCUDAReplaceArray(Vec vin,PetscScalar *a)
{
cudaError_t err;
PetscFunctionBegin;
err = cudaFree(((Vec_CUDA*)vin->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAResetArray"
/*@C
VecCUDAResetArray - Resets a vector to use its default memory. Call this
after the use of VecCUDAPlaceArray().
Not Collective
Input Parameters:
. vec - the vector
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecPlaceArray(), VecResetArray(), VecCUDAPlaceArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAResetArray(Vec vin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
((Vec_CUDA*)vin->spptr)->GPUarray = (PetscScalar *) ((Vec_Seq*)vin->data)->unplacedarray;
((Vec_Seq*)vin->data)->unplacedarray = 0;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
|
e33d9ae489a7ef24b64ebf8dd277cd92f90a3167.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_common/cuda_prefix_sum.cuh"
#include <chrono>
#include <iostream>
#include "cuda_common/cuda_check.cuh"
using namespace pplanner;
void PrefixSumTime() {
int m = 20;
int n = 256;
int *cpu_array = new int[n * m];
for (int i = 0; i < n * m; ++i)
cpu_array[i] = rand() % 1000;
int *cpu_sum = new int[n * m];
cpu_sum[0] = 0;
auto chrono_start = std::chrono::system_clock::now();
for (int i = 1; i < n * m; ++i)
cpu_sum[i] += cpu_sum[i - 1] + cpu_array[i - 1];
auto chrono_end = std::chrono::system_clock::now();
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
chrono_end - chrono_start).count();
std::cout << "cpu: " << ns << " ns" << std::endl;
int *gpu_array = nullptr;
CudaMallocAndCopy((void**)&gpu_array, cpu_array, n * m * sizeof(int));
int *gpu_sum = nullptr;
CUDA_CHECK(hipMalloc((void**)&gpu_sum, n * m * sizeof(int)));
int *gpu_offsets = nullptr;
CUDA_CHECK(hipMalloc((void**)&gpu_offsets, n * sizeof(int)));
int *tmp_sum = new int[n * m];
size_t n_shared = (n + ConflictFreeOffset(n - 1)) * sizeof(int);
chrono_start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( PrefixSum), dim3(m), dim3(n / 2), n_shared, 0, gpu_array, gpu_sum, n * m);
CUDA_CHECK(hipMemcpy(tmp_sum, gpu_sum, n * m * sizeof(int),
hipMemcpyDeviceToHost));
int small[m];
small[0] = 0;
for (int i = 1; i < m; ++i)
small[i] = tmp_sum[i * n - 1];
CUDA_CHECK(hipMemcpy(gpu_offsets, small, n * sizeof(int),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( AddBlockPrefixSum), dim3(m), dim3(n), 0, 0, gpu_offsets, gpu_sum, n * m);
CUDA_CHECK(hipMemcpy(tmp_sum, gpu_sum, n * m * sizeof(int),
hipMemcpyDeviceToHost));
chrono_end = std::chrono::system_clock::now();
ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
chrono_end - chrono_start).count();
std::cout << "gpu: " << ns << " ns" << std::endl;
delete[] cpu_array;
delete[] cpu_sum;
delete[] tmp_sum;
CUDA_CHECK(hipFree(gpu_array));
CUDA_CHECK(hipFree(gpu_sum));
}
int main() {
PrefixSumTime();
return 0;
}
| e33d9ae489a7ef24b64ebf8dd277cd92f90a3167.cu | #include "cuda_common/cuda_prefix_sum.cuh"
#include <chrono>
#include <iostream>
#include "cuda_common/cuda_check.cuh"
using namespace pplanner;
void PrefixSumTime() {
int m = 20;
int n = 256;
int *cpu_array = new int[n * m];
for (int i = 0; i < n * m; ++i)
cpu_array[i] = rand() % 1000;
int *cpu_sum = new int[n * m];
cpu_sum[0] = 0;
auto chrono_start = std::chrono::system_clock::now();
for (int i = 1; i < n * m; ++i)
cpu_sum[i] += cpu_sum[i - 1] + cpu_array[i - 1];
auto chrono_end = std::chrono::system_clock::now();
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
chrono_end - chrono_start).count();
std::cout << "cpu: " << ns << " ns" << std::endl;
int *gpu_array = nullptr;
CudaMallocAndCopy((void**)&gpu_array, cpu_array, n * m * sizeof(int));
int *gpu_sum = nullptr;
CUDA_CHECK(cudaMalloc((void**)&gpu_sum, n * m * sizeof(int)));
int *gpu_offsets = nullptr;
CUDA_CHECK(cudaMalloc((void**)&gpu_offsets, n * sizeof(int)));
int *tmp_sum = new int[n * m];
size_t n_shared = (n + ConflictFreeOffset(n - 1)) * sizeof(int);
chrono_start = std::chrono::system_clock::now();
PrefixSum<<<m, n / 2, n_shared>>>(gpu_array, gpu_sum, n * m);
CUDA_CHECK(cudaMemcpy(tmp_sum, gpu_sum, n * m * sizeof(int),
cudaMemcpyDeviceToHost));
int small[m];
small[0] = 0;
for (int i = 1; i < m; ++i)
small[i] = tmp_sum[i * n - 1];
CUDA_CHECK(cudaMemcpy(gpu_offsets, small, n * sizeof(int),
cudaMemcpyHostToDevice));
AddBlockPrefixSum<<<m, n>>>(gpu_offsets, gpu_sum, n * m);
CUDA_CHECK(cudaMemcpy(tmp_sum, gpu_sum, n * m * sizeof(int),
cudaMemcpyDeviceToHost));
chrono_end = std::chrono::system_clock::now();
ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
chrono_end - chrono_start).count();
std::cout << "gpu: " << ns << " ns" << std::endl;
delete[] cpu_array;
delete[] cpu_sum;
delete[] tmp_sum;
CUDA_CHECK(cudaFree(gpu_array));
CUDA_CHECK(cudaFree(gpu_sum));
}
int main() {
PrefixSumTime();
return 0;
}
|
9d8b3aa2dac9090ced8e992fd6e962223b089346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void write_to_surface(const float *data, hipSurfaceObject_t surface, const int width, const int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int b = 4 * blockIdx.z;
if (x < width && y < height) {
const int wh = width * height;
const int offset = b * wh + y * width + x;
float4 tmp;
tmp.x = data[0 * wh + offset];
tmp.y = data[1 * wh + offset];
tmp.z = data[2 * wh + offset];
tmp.w = data[3 * wh + offset];
surf2DLayeredwrite<float4>(tmp, surface, x * sizeof(float4), y, blockIdx.z);
}
} | 9d8b3aa2dac9090ced8e992fd6e962223b089346.cu | #include "includes.h"
__global__ void write_to_surface(const float *data, cudaSurfaceObject_t surface, const int width, const int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int b = 4 * blockIdx.z;
if (x < width && y < height) {
const int wh = width * height;
const int offset = b * wh + y * width + x;
float4 tmp;
tmp.x = data[0 * wh + offset];
tmp.y = data[1 * wh + offset];
tmp.z = data[2 * wh + offset];
tmp.w = data[3 * wh + offset];
surf2DLayeredwrite<float4>(tmp, surface, x * sizeof(float4), y, blockIdx.z);
}
} |
01d72af2e1ef47f1133476a4006a7641377ceb57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void lyra2Z_gpu_hash_32_2(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) {} | 01d72af2e1ef47f1133476a4006a7641377ceb57.cu | #include "includes.h"
__global__ void lyra2Z_gpu_hash_32_2(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) {} |
3fbe82887d02aa911623c85043d7d714aa70e812.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define N 1000
#define TILE_WIDTH 4
#define blocksize 4
/*
Nombre de la funcion: KernelMultMatTiled
Parametros: d_M, d_N ,d_p, m, n, y
d_M: matriz de dimesin m*n
d_N: matriz de dimensin n*y
d_p: matriz resultante de dimensin m*y
m: numero de filas de d_M
n: numero de columnas de d_M y de filas de d_N
y: numero de columnas de d_N
Objetivo: realizar una multiplicacin de dos matrices de diferentes dimensiones
aprovechando las bondades que brinda el paralelismo mediante el concepto de TILE
*/
__global__ void kernelMultMatTiled(float *d_M, float *d_N, float *d_P, int m,int n , int y){
// se define la memoria compartida de los tiles de tamao TILE_WIDTH
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int i = 0; i < n / TILE_WIDTH; i++){
/* primeramente se revisa que el elemento se encuentre en la matriz d_M ,
si no es as se establecen como cero
*/
if((i*TILE_WIDTH + tx) < n && row < m){
Mds[ty][tx]=d_M[row*n + (i*TILE_WIDTH + tx)];
}else{
Mds[ty][tx]=0.0;
}
/* despues se revisa que el elemento se encuentre en la matriz d_N ,
si no es as se establecen como cero
*/
if((i*TILE_WIDTH + ty) < n && col < y){
Nds[ty][tx]= d_N[(i*TILE_WIDTH + ty)*y + col];
}else{
Nds[ty][tx]=0.0;
}
__syncthreads();
/*Se realiza la multiplicacion de elementos que estn dentro del TILE
y se va guardando en Pvalue*/
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
//se asigna el resultado de Pvalue en las posiciones de d_P
if(row<m && col < y)
d_P[(row*y)+ col] = Pvalue;
}
__global__ void kernelMultMat(int *a, int *b, int *c,int m){
int i,add;
int col=blockDim.x*blockIdx.x + threadIdx.x;
int row=blockDim.y*blockIdx.y + threadIdx.y;
if(col<m && row<m) {
add=0;
for(i=0; i< m ;i++){
add += a[i+m*row]*b[col+m*i];
}
c[row*m+col] = add;
}
}
/*
Nombre de la funcion: inicializarMat
Parametros: Ma, m , n
Ma: matriz que se va a inicializar
m: numero de filas
n: numero de columnas
Objetivo: inicializar una matriz con valores "aleatorios"
*/
void inicializarMat(float *Ma , int m, int n){
for(int i = 0; i <= m*n +1; i++){
Ma[i] = 1;
}
}
/*
Nombre de la funcion: multiplicacionHost
Parametros: h_Ma, h_Mb, h_Mc, m , n , y
h_Ma : datos de la matriz A
h_Mb : datos de la matriz B
h_Mc : Matriz donde se van almacenar los datos
m : Numero de filas de la matriz A
n : Numero de columnas de la matriz A y numero de filas de la matriz B
y : Numero de columnas de la matriz B
Matriz h_Ma= m*n Matriz h_Mb= n*y Matriz h_Mc = m*y
Objetivo: multiplicar dos matrices de diferentes dimensiones
*/
void multiplicacionHost(float *h_Ma, float *h_Mb, float *h_Mc, int m, int n, int y){
float p;
//iteracin sobre las filas de la matriz h_Ma
for(int row = 0; row < m ; row++){
//iteracin sobre las columnas de la matriz h_Mb
for(int col = 0; col < y ; col++){
p = 0;
for(int k = 0; k < n; k++){
//se realiza la multiplicacion y se guarda el reultado en p
p += h_Ma[row*m+k] * h_Mb[k*n+col];
}
//se asigna el resultado p en las posiciones de la matriz resultante h_Mc
h_Mc[row*m+col] = p;
}
}
}
/*
Nombre de la funcion: printData
Parametros: Mat, m , n
Mat: los valores de la matriz que se van a imprimir
m: numero de filas
n: numero de columnas
Objetivo : imprimir los datos de una matriz
*/
int printData(float *Mat, int m,int n, int tipo){
if(tipo == 1)
printf("================ Matriz A ================ \n");
if(tipo == 2)
printf("================ Matriz B ================ \n");
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%.2f ", Mat[(i*m)+j]);
}
printf("\n");
}
printf("=============================\n\n");
return 0;
}
int main(){
float *h_Ma,*h_Mb,*h_Mc,*d_Ma,*d_Mb,*d_Mc,*h_Mresult;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
int n,m,y;
//dimension de matrices m*n y n*y
m=1600;
n=1600;
y=1500;
//asignacion memoria en el host
h_Ma= (float*)malloc((m*n)*sizeof(float));
h_Mb= (float*)malloc((n*y)*sizeof(float));
h_Mc= (float*)malloc((m*y)*sizeof(float));
h_Mresult = (float*)malloc((m*y)*sizeof(float));
// inicializar matrices h_Ma, h_Mb
inicializarMat(h_Ma,m,n);
inicializarMat(h_Mb,n,y);
start = clock();
multiplicacionHost(h_Ma,h_Mb,h_Mc,m,n,y);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo CPU: %.10f\n", cpu_time_used);
//asignacion de memoria en el device
hipMalloc((void**)&d_Ma,(m*n)*sizeof(float));
hipMalloc((void**)&d_Mb,(n*y)*sizeof(float));
hipMalloc((void**)&d_Mc,(m*y)*sizeof(float));
//inicio de reloj
startGPU = clock();
//copiar matrices del host al device
hipMemcpy(d_Ma,h_Ma,(m*n)*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_Mb,h_Mb,(n*y)*sizeof(float),hipMemcpyHostToDevice);
//se establecen numero de bloques y el numero de hilos por bloque
dim3 DimBlock(blocksize, blocksize, 1);
dim3 DimGrid(ceil(y / float(blocksize)), ceil(m / float(blocksize)), 1);
//se lanza el kernel de multiplicacion de matrices haciendo uso de TILES
hipLaunchKernelGGL(( kernelMultMatTiled), dim3(DimGrid),dim3(DimBlock), 0, 0, d_Ma,d_Mb,d_Mc,m,n,y);
hipDeviceSynchronize();
//se copia el contenido de la matriz resultante en el device al host
hipMemcpy(h_Mresult,d_Mc,(m*y)*sizeof(float),hipMemcpyDeviceToHost);
//fin de reloj
endGPU = clock();
//Calculo de tiempo
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo GPU : %.10f\n", gpu_time_used);
printf("\n");
return 0;
}
| 3fbe82887d02aa911623c85043d7d714aa70e812.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define N 1000
#define TILE_WIDTH 4
#define blocksize 4
/*
Nombre de la funcion: KernelMultMatTiled
Parametros: d_M, d_N ,d_p, m, n, y
d_M: matriz de dimesión m*n
d_N: matriz de dimensión n*y
d_p: matriz resultante de dimensión m*y
m: numero de filas de d_M
n: numero de columnas de d_M y de filas de d_N
y: numero de columnas de d_N
Objetivo: realizar una multiplicación de dos matrices de diferentes dimensiones
aprovechando las bondades que brinda el paralelismo mediante el concepto de TILE
*/
__global__ void kernelMultMatTiled(float *d_M, float *d_N, float *d_P, int m,int n , int y){
// se define la memoria compartida de los tiles de tamaño TILE_WIDTH
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int i = 0; i < n / TILE_WIDTH; i++){
/* primeramente se revisa que el elemento se encuentre en la matriz d_M ,
si no es así se establecen como cero
*/
if((i*TILE_WIDTH + tx) < n && row < m){
Mds[ty][tx]=d_M[row*n + (i*TILE_WIDTH + tx)];
}else{
Mds[ty][tx]=0.0;
}
/* despues se revisa que el elemento se encuentre en la matriz d_N ,
si no es así se establecen como cero
*/
if((i*TILE_WIDTH + ty) < n && col < y){
Nds[ty][tx]= d_N[(i*TILE_WIDTH + ty)*y + col];
}else{
Nds[ty][tx]=0.0;
}
__syncthreads();
/*Se realiza la multiplicacion de elementos que están dentro del TILE
y se va guardando en Pvalue*/
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
//se asigna el resultado de Pvalue en las posiciones de d_P
if(row<m && col < y)
d_P[(row*y)+ col] = Pvalue;
}
__global__ void kernelMultMat(int *a, int *b, int *c,int m){
int i,add;
int col=blockDim.x*blockIdx.x + threadIdx.x;
int row=blockDim.y*blockIdx.y + threadIdx.y;
if(col<m && row<m) {
add=0;
for(i=0; i< m ;i++){
add += a[i+m*row]*b[col+m*i];
}
c[row*m+col] = add;
}
}
/*
Nombre de la funcion: inicializarMat
Parametros: Ma, m , n
Ma: matriz que se va a inicializar
m: numero de filas
n: numero de columnas
Objetivo: inicializar una matriz con valores "aleatorios"
*/
void inicializarMat(float *Ma , int m, int n){
for(int i = 0; i <= m*n +1; i++){
Ma[i] = 1;
}
}
/*
Nombre de la funcion: multiplicacionHost
Parametros: h_Ma, h_Mb, h_Mc, m , n , y
h_Ma : datos de la matriz A
h_Mb : datos de la matriz B
h_Mc : Matriz donde se van almacenar los datos
m : Numero de filas de la matriz A
n : Numero de columnas de la matriz A y numero de filas de la matriz B
y : Numero de columnas de la matriz B
Matriz h_Ma= m*n Matriz h_Mb= n*y Matriz h_Mc = m*y
Objetivo: multiplicar dos matrices de diferentes dimensiones
*/
void multiplicacionHost(float *h_Ma, float *h_Mb, float *h_Mc, int m, int n, int y){
float p;
//iteración sobre las filas de la matriz h_Ma
for(int row = 0; row < m ; row++){
//iteración sobre las columnas de la matriz h_Mb
for(int col = 0; col < y ; col++){
p = 0;
for(int k = 0; k < n; k++){
//se realiza la multiplicacion y se guarda el reultado en p
p += h_Ma[row*m+k] * h_Mb[k*n+col];
}
//se asigna el resultado p en las posiciones de la matriz resultante h_Mc
h_Mc[row*m+col] = p;
}
}
}
/*
Nombre de la funcion: printData
Parametros: Mat, m , n
Mat: los valores de la matriz que se van a imprimir
m: numero de filas
n: numero de columnas
Objetivo : imprimir los datos de una matriz
*/
int printData(float *Mat, int m,int n, int tipo){
if(tipo == 1)
printf("================ Matriz A ================ \n");
if(tipo == 2)
printf("================ Matriz B ================ \n");
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%.2f ", Mat[(i*m)+j]);
}
printf("\n");
}
printf("=============================\n\n");
return 0;
}
int main(){
float *h_Ma,*h_Mb,*h_Mc,*d_Ma,*d_Mb,*d_Mc,*h_Mresult;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
int n,m,y;
//dimension de matrices m*n y n*y
m=1600;
n=1600;
y=1500;
//asignacion memoria en el host
h_Ma= (float*)malloc((m*n)*sizeof(float));
h_Mb= (float*)malloc((n*y)*sizeof(float));
h_Mc= (float*)malloc((m*y)*sizeof(float));
h_Mresult = (float*)malloc((m*y)*sizeof(float));
// inicializar matrices h_Ma, h_Mb
inicializarMat(h_Ma,m,n);
inicializarMat(h_Mb,n,y);
start = clock();
multiplicacionHost(h_Ma,h_Mb,h_Mc,m,n,y);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo CPU: %.10f\n", cpu_time_used);
//asignacion de memoria en el device
cudaMalloc((void**)&d_Ma,(m*n)*sizeof(float));
cudaMalloc((void**)&d_Mb,(n*y)*sizeof(float));
cudaMalloc((void**)&d_Mc,(m*y)*sizeof(float));
//inicio de reloj
startGPU = clock();
//copiar matrices del host al device
cudaMemcpy(d_Ma,h_Ma,(m*n)*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_Mb,h_Mb,(n*y)*sizeof(float),cudaMemcpyHostToDevice);
//se establecen numero de bloques y el numero de hilos por bloque
dim3 DimBlock(blocksize, blocksize, 1);
dim3 DimGrid(ceil(y / float(blocksize)), ceil(m / float(blocksize)), 1);
//se lanza el kernel de multiplicacion de matrices haciendo uso de TILES
kernelMultMatTiled<<<DimGrid,DimBlock>>>(d_Ma,d_Mb,d_Mc,m,n,y);
cudaDeviceSynchronize();
//se copia el contenido de la matriz resultante en el device al host
cudaMemcpy(h_Mresult,d_Mc,(m*y)*sizeof(float),cudaMemcpyDeviceToHost);
//fin de reloj
endGPU = clock();
//Calculo de tiempo
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo GPU : %.10f\n", gpu_time_used);
printf("\n");
return 0;
}
|
d5c974eefa99f3ed89eb2d2fbeda61505171a7cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/softmax.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
namespace lbann {
namespace {
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
/** Functor to ensure values are above threshold value */
template <typename TensorDataType>
struct threshold_op {
__forceinline__ __device__ TensorDataType operator()(const TensorDataType& y) const {
return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
}
};
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Compute exp(x-shift)
*
* Also compute sum(exp(x-shift)) for each matrix column.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_exp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = gpu_lib::exp(x-shift);
thread_sum += y;
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = exp(x-shift) / sum(exp(x-shift))
*
* If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are
* thresholded to a minimum value to avoid denormalized floats.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param output On input, constains exp(x-shift). On output,
* contains the layer output.
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& denom = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
auto& y = output[row+col*output_ldim];
y /= denom;
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
}
/** @brief Compute dot(y,dy) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void bp_dot_product_kernel(
size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ dot_products) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute dot product contribution for each thread
TensorDataType thread_dot_product{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
thread_dot_product += y * dy;
}
// Compute dot product contribution for each block
const TensorDataType block_dot_product
= gpu_lib::block_reduce<bsize,1,1>(thread_dot_product);
if (tid == 0) {
gpu_lib::atomic_add(&dot_products[col], block_dot_product);
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = y * (dy - dot(y,dy))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param dot_products dot(y,dy) for each matrix column
*/
template <size_t bsize, typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ dot_products,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& y_dot_dy = dot_products[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = y * (dy - y_dot_dy);
}
}
}
#ifdef LBANN_HAS_DISTCONV
template <typename TensorDataType, data_layout Layout, El::Device Device>
void fp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) {
dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations());
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) {
dc.m_softmax->backward(dc.get_activations(),
dc.get_prev_error_signals(),
dc.get_error_signals());
}
#endif // LBANN_HAS_DISTCONV
} // namespace
template <typename TensorDataType>
void fp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
fp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
if (!local_input.IsEmpty()) {
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_mode);
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output,
local_output);
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
template <typename TensorDataType>
void bp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
bp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
l.m_mode);
}
template <typename TensorDataType>
void fp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
if(l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix());
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_output.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<TensorDataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute exp(x-max_val) and sum(exp(x-max_val))
El::Zero(*l.m_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_exp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = exp(x-max_val) / sum(exp(x-max_val))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.Buffer(), local_output.LDim(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
if(l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute dot(y,dy)
El::Zero(local_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_dot_product_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::setup_fp_dnn_descriptors()
{
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::setup_bp_dnn_descriptors()
{
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| d5c974eefa99f3ed89eb2d2fbeda61505171a7cc.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/softmax.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
namespace lbann {
namespace {
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
/** Functor to ensure values are above threshold value */
template <typename TensorDataType>
struct threshold_op {
__forceinline__ __device__ TensorDataType operator()(const TensorDataType& y) const {
return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
}
};
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Compute exp(x-shift)
*
* Also compute sum(exp(x-shift)) for each matrix column.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_exp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = gpu_lib::exp(x-shift);
thread_sum += y;
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = exp(x-shift) / sum(exp(x-shift))
*
* If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are
* thresholded to a minimum value to avoid denormalized floats.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param output On input, constains exp(x-shift). On output,
* contains the layer output.
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& denom = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
auto& y = output[row+col*output_ldim];
y /= denom;
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
}
/** @brief Compute dot(y,dy) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void bp_dot_product_kernel(
size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ dot_products) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute dot product contribution for each thread
TensorDataType thread_dot_product{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
thread_dot_product += y * dy;
}
// Compute dot product contribution for each block
const TensorDataType block_dot_product
= gpu_lib::block_reduce<bsize,1,1>(thread_dot_product);
if (tid == 0) {
gpu_lib::atomic_add(&dot_products[col], block_dot_product);
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = y * (dy - dot(y,dy))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param dot_products dot(y,dy) for each matrix column
*/
template <size_t bsize, typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ dot_products,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& y_dot_dy = dot_products[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = y * (dy - y_dot_dy);
}
}
}
#ifdef LBANN_HAS_DISTCONV
template <typename TensorDataType, data_layout Layout, El::Device Device>
void fp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) {
dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations());
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) {
dc.m_softmax->backward(dc.get_activations(),
dc.get_prev_error_signals(),
dc.get_error_signals());
}
#endif // LBANN_HAS_DISTCONV
} // namespace
template <typename TensorDataType>
void fp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
fp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
if (!local_input.IsEmpty()) {
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_mode);
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output,
local_output);
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
template <typename TensorDataType>
void bp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
bp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
l.m_mode);
}
template <typename TensorDataType>
void fp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
if(l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix());
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_output.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<TensorDataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute exp(x-max_val) and sum(exp(x-max_val))
El::Zero(*l.m_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_exp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = exp(x-max_val) / sum(exp(x-max_val))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.Buffer(), local_output.LDim(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
if(l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute dot(y,dy)
El::Zero(local_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_dot_product_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::setup_fp_dnn_descriptors()
{
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::setup_bp_dnn_descriptors()
{
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
a4fca9e94d1c8372f153ab9a611b2eecbcb97150.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
* BenchIT - Performance Measurement for Scientific Applications
* Contact: [email protected]
*
* For license details see COPYING in the package base directory
*******************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "interface.h"
/* Header for local functions
*/
#include "work.h"
/** These variables will help us to keep the overview over the arrays
* we access for our functions/data.
*/
/* Number of different ways an algorithm will be measured.
Example: loop orders: ijk, ikj, jki, jik, kij, kji -> functionCount=6 with
each different loop order in an own function. */
int functionCount = 1; //Read
/* Number of fixed functions we have per measurement.
Example: execution time and MFLOPS are measured for each loop order
-> valuesPerFunction=2 */
int valuesPerFunction; //maxThreads-minThreads
int minThreads;
int maxThreads;
int arrayLength;
int arraySize;
int iterations;
/* Header for local functions
*/
void evaluate_environment(void);
/** The implementation of the bi_getinfo from the BenchIT interface.
* Here the infostruct is filled with information about the
* kernel.
* @param infostruct a pointer to a structure filled with zeros
*/
void bi_getinfo(bi_info * infostruct)
{
int index1;
/* get environment variables for the kernel */
evaluate_environment();
infostruct->codesequence = bi_strdup("collisionRead(stride)");
infostruct->xaxistext = bi_strdup("Stride");
infostruct->num_measurements = infostruct->listsize;
infostruct->num_processes = 1;
infostruct->num_threads_per_process = 0;
infostruct->kernel_execs_mpi1 = 0;
infostruct->kernel_execs_mpi2 = 0;
infostruct->kernel_execs_pvm = 0;
infostruct->kernel_execs_omp = 0;
infostruct->kernel_execs_pthreads = 0;
valuesPerFunction = maxThreads-minThreads+1;
infostruct->numfunctions = functionCount * valuesPerFunction;
/* allocating memory for y axis texts and properties */
allocYAxis(infostruct);
/* setting up y axis texts and properties */
for(index1 = 0; index1 < infostruct->numfunctions; index1++){
infostruct->yaxistexts[index1] = bi_strdup("Cycles");
infostruct->selected_result[index1] = SELECT_RESULT_LOWEST;
infostruct->base_yaxis[index1] = 0;
}
for(index1 = 0; index1 < valuesPerFunction; index1++){
char str[30];
int numThreads = (int)pow(2,minThreads+index1);
sprintf(str, "Read latency #%i", numThreads);
infostruct->legendtexts[index1] = bi_strdup(str);
}
}
/** Implementation of the bi_init of the BenchIT interface.
* Here you have the chance to allocate the memory you need.
* It is also possible to allocate the memory at the beginning
* of every single measurement and to free the memory thereafter.
* But always making use of the same memory is faster.
* HAVE A LOOK INTO THE HOWTO !
*/
void* bi_init(int problemSizemax)
{
mydata_t* mdp;
mdp = (mydata_t*)malloc(sizeof(mydata_t));
if (mdp == 0)
{
fprintf(stderr, "Allocation of structure mydata_t failed\n"); fflush(stderr);
exit(127);
}
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
mdp->h_array = (uint*) malloc(arraySize);
//Add 1 dummy value
CUDA_CHECK(hipMalloc((void**)&(mdp->d_array),arraySize+sizeof(uint)));
mdp->h_duration = (uint*) malloc(2*sizeof(uint));
CUDA_CHECK(hipMalloc((void**)&(mdp->d_duration),2*sizeof(uint)));
return (void*)mdp;
}
/** The central function within each kernel. This function
* is called for each measurement step seperately.
* @param mdpv a pointer to the structure created in bi_init,
* it is the pointer the bi_init returns
* @param problemSize the actual problemSize
* @param results a pointer to a field of doubles, the
* size of the field depends on the number
* of functions, there are #functions+1
* doubles
* @return 0 if the measurement was sucessfull, something
* else in the case of an error
*/
int bi_entry(void* mdpv, int problemSize, double* results)
{
int i;
/* cast void* pointer */
mydata_t* mdp = (mydata_t*)mdpv;
/* calculate real problemSize */
problemSize = bi_get_list_element(problemSize);
// the xaxis value needs to be stored only once!
results[0] = (double)problemSize;
for(i = 0; i < arrayLength; i++)
mdp->h_array[i] = (i + 1) % arrayLength;
CUDA_CHECK(hipMemcpy(mdp->d_array,mdp->h_array,arraySize,hipMemcpyHostToDevice));
for(i=0; i < valuesPerFunction;i++){
// 1 Block a 32 Threads --> 1 Warp
int numThreads=pow(2,minThreads+i);
dim3 dimBlock(numThreads);
dim3 dimGrid(1);
hipLaunchKernelGGL(( CUDA_CHECK_KERNEL_SYNC(collisionRead), dim3(dimGrid), dim3(dimBlock), arraySize, 0, mdp->d_array, arrayLength, problemSize, iterations, mdp->d_duration));
CUDA_CHECK(hipMemcpy(mdp->h_duration,mdp->d_duration,2*sizeof(uint),hipMemcpyDeviceToHost));
results[i+1]=mdp->h_duration[0] / (256 * iterations);
}
return 0;
}
// Clean up the memory
void bi_cleanup(void* mdpv)
{
mydata_t* mdp = (mydata_t*)mdpv;
free(mdp->h_array);
free(mdp->h_duration);
CUDA_CHECK(hipFree(mdp->d_array));
CUDA_CHECK(hipFree(mdp->d_duration));
free(mdp);
return;
}
/********************************************************************/
/*************** End of interface implementations *******************/
/********************************************************************/
/* Reads the environment variables used by this kernel. */
void evaluate_environment()
{
int errors = 0;
char * p = 0;
p = bi_getenv("BENCHIT_KERNEL_PROBLEMLIST", 0);
if(p==0) errors++;
else bi_parselist(p);
p = bi_getenv("BENCHIT_KERNEL_MINTHREADS", 0);
if(p==0) errors++;
else minThreads=atoi(p);
p = bi_getenv("BENCHIT_KERNEL_MAXTHREADS", 0);
if(p==0) errors++;
else maxThreads=atoi(p);
p = bi_getenv("BENCHIT_KERNEL_ARRAYLENGTH", 0);
if(p==0) errors++;
else arrayLength = atoi(p);
arraySize = arrayLength * sizeof(uint);
p = bi_getenv("BENCHIT_KERNEL_ITERATIONS", 0);
if(p==0) errors++;
else iterations = atoi(p);
if (errors > 0)
{
fprintf(stderr, "There's at least one environment variable not set!\n");
fprintf(stderr, "This kernel needs the following environment variables:\n");
fprintf(stderr, "BENCHIT_KERNEL_PROBLEMLIST\n");
fprintf(stderr, "BENCHIT_KERNEL_MINTHREADS\n");
fprintf(stderr, "BENCHIT_KERNEL_MAXTHREADS\n");
fprintf(stderr, "BENCHIT_KERNEL_ARRAYSIZE\n");
fprintf(stderr, "BENCHIT_KERNEL_ITERATIONS\n");
exit(1);
}
}
| a4fca9e94d1c8372f153ab9a611b2eecbcb97150.cu | /********************************************************************
* BenchIT - Performance Measurement for Scientific Applications
* Contact: [email protected]
*
* For license details see COPYING in the package base directory
*******************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "interface.h"
/* Header for local functions
*/
#include "work.h"
/** These variables will help us to keep the overview over the arrays
* we access for our functions/data.
*/
/* Number of different ways an algorithm will be measured.
Example: loop orders: ijk, ikj, jki, jik, kij, kji -> functionCount=6 with
each different loop order in an own function. */
int functionCount = 1; //Read
/* Number of fixed functions we have per measurement.
Example: execution time and MFLOPS are measured for each loop order
-> valuesPerFunction=2 */
int valuesPerFunction; //maxThreads-minThreads
int minThreads;
int maxThreads;
int arrayLength;
int arraySize;
int iterations;
/* Header for local functions
*/
void evaluate_environment(void);
/** The implementation of the bi_getinfo from the BenchIT interface.
* Here the infostruct is filled with information about the
* kernel.
* @param infostruct a pointer to a structure filled with zeros
*/
void bi_getinfo(bi_info * infostruct)
{
int index1;
/* get environment variables for the kernel */
evaluate_environment();
infostruct->codesequence = bi_strdup("collisionRead(stride)");
infostruct->xaxistext = bi_strdup("Stride");
infostruct->num_measurements = infostruct->listsize;
infostruct->num_processes = 1;
infostruct->num_threads_per_process = 0;
infostruct->kernel_execs_mpi1 = 0;
infostruct->kernel_execs_mpi2 = 0;
infostruct->kernel_execs_pvm = 0;
infostruct->kernel_execs_omp = 0;
infostruct->kernel_execs_pthreads = 0;
valuesPerFunction = maxThreads-minThreads+1;
infostruct->numfunctions = functionCount * valuesPerFunction;
/* allocating memory for y axis texts and properties */
allocYAxis(infostruct);
/* setting up y axis texts and properties */
for(index1 = 0; index1 < infostruct->numfunctions; index1++){
infostruct->yaxistexts[index1] = bi_strdup("Cycles");
infostruct->selected_result[index1] = SELECT_RESULT_LOWEST;
infostruct->base_yaxis[index1] = 0;
}
for(index1 = 0; index1 < valuesPerFunction; index1++){
char str[30];
int numThreads = (int)pow(2,minThreads+index1);
sprintf(str, "Read latency #%i", numThreads);
infostruct->legendtexts[index1] = bi_strdup(str);
}
}
/** Implementation of the bi_init of the BenchIT interface.
* Here you have the chance to allocate the memory you need.
* It is also possible to allocate the memory at the beginning
* of every single measurement and to free the memory thereafter.
* But always making use of the same memory is faster.
* HAVE A LOOK INTO THE HOWTO !
*/
void* bi_init(int problemSizemax)
{
mydata_t* mdp;
mdp = (mydata_t*)malloc(sizeof(mydata_t));
if (mdp == 0)
{
fprintf(stderr, "Allocation of structure mydata_t failed\n"); fflush(stderr);
exit(127);
}
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
mdp->h_array = (uint*) malloc(arraySize);
//Add 1 dummy value
CUDA_CHECK(cudaMalloc((void**)&(mdp->d_array),arraySize+sizeof(uint)));
mdp->h_duration = (uint*) malloc(2*sizeof(uint));
CUDA_CHECK(cudaMalloc((void**)&(mdp->d_duration),2*sizeof(uint)));
return (void*)mdp;
}
/** The central function within each kernel. This function
* is called for each measurement step seperately.
* @param mdpv a pointer to the structure created in bi_init,
* it is the pointer the bi_init returns
* @param problemSize the actual problemSize
* @param results a pointer to a field of doubles, the
* size of the field depends on the number
* of functions, there are #functions+1
* doubles
* @return 0 if the measurement was sucessfull, something
* else in the case of an error
*/
int bi_entry(void* mdpv, int problemSize, double* results)
{
int i;
/* cast void* pointer */
mydata_t* mdp = (mydata_t*)mdpv;
/* calculate real problemSize */
problemSize = bi_get_list_element(problemSize);
// the xaxis value needs to be stored only once!
results[0] = (double)problemSize;
for(i = 0; i < arrayLength; i++)
mdp->h_array[i] = (i + 1) % arrayLength;
CUDA_CHECK(cudaMemcpy(mdp->d_array,mdp->h_array,arraySize,cudaMemcpyHostToDevice));
for(i=0; i < valuesPerFunction;i++){
// 1 Block a 32 Threads --> 1 Warp
int numThreads=pow(2,minThreads+i);
dim3 dimBlock(numThreads);
dim3 dimGrid(1);
CUDA_CHECK_KERNEL_SYNC(collisionRead<<<dimGrid, dimBlock, arraySize>>>(mdp->d_array, arrayLength, problemSize, iterations, mdp->d_duration));
CUDA_CHECK(cudaMemcpy(mdp->h_duration,mdp->d_duration,2*sizeof(uint),cudaMemcpyDeviceToHost));
results[i+1]=mdp->h_duration[0] / (256 * iterations);
}
return 0;
}
// Clean up the memory
void bi_cleanup(void* mdpv)
{
mydata_t* mdp = (mydata_t*)mdpv;
free(mdp->h_array);
free(mdp->h_duration);
CUDA_CHECK(cudaFree(mdp->d_array));
CUDA_CHECK(cudaFree(mdp->d_duration));
free(mdp);
return;
}
/********************************************************************/
/*************** End of interface implementations *******************/
/********************************************************************/
/* Reads the environment variables used by this kernel. */
void evaluate_environment()
{
int errors = 0;
char * p = 0;
p = bi_getenv("BENCHIT_KERNEL_PROBLEMLIST", 0);
if(p==0) errors++;
else bi_parselist(p);
p = bi_getenv("BENCHIT_KERNEL_MINTHREADS", 0);
if(p==0) errors++;
else minThreads=atoi(p);
p = bi_getenv("BENCHIT_KERNEL_MAXTHREADS", 0);
if(p==0) errors++;
else maxThreads=atoi(p);
p = bi_getenv("BENCHIT_KERNEL_ARRAYLENGTH", 0);
if(p==0) errors++;
else arrayLength = atoi(p);
arraySize = arrayLength * sizeof(uint);
p = bi_getenv("BENCHIT_KERNEL_ITERATIONS", 0);
if(p==0) errors++;
else iterations = atoi(p);
if (errors > 0)
{
fprintf(stderr, "There's at least one environment variable not set!\n");
fprintf(stderr, "This kernel needs the following environment variables:\n");
fprintf(stderr, "BENCHIT_KERNEL_PROBLEMLIST\n");
fprintf(stderr, "BENCHIT_KERNEL_MINTHREADS\n");
fprintf(stderr, "BENCHIT_KERNEL_MAXTHREADS\n");
fprintf(stderr, "BENCHIT_KERNEL_ARRAYSIZE\n");
fprintf(stderr, "BENCHIT_KERNEL_ITERATIONS\n");
exit(1);
}
}
|
8bd82c0f7ff361e5f1c185405220036746ea2aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "b_kernel.hu"
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
int main()
{
int a[]={1,2,3,4,5,6};
int b[]={1,2,3,4,5,6};
int c[4];
int i;
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
int *dev_a;
int *dev_b;
int *dev_c;
cudaCheckReturn(hipMalloc((void **) &dev_a, (4) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_b, (4) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_c, (4) * sizeof(int)));
cudaCheckReturn(hipMemcpy(dev_a, a, (4) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_b, b, (4) * sizeof(int), hipMemcpyHostToDevice));
{
dim3 k0_dimBlock(4);
dim3 k0_dimGrid(1);
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_a, dev_b, dev_c);
cudaCheckKernel();
}
cudaCheckReturn(hipMemcpy(c, dev_c, (4) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipFree(dev_a));
cudaCheckReturn(hipFree(dev_b));
cudaCheckReturn(hipFree(dev_c));
}
for(i=0;i<4;i++)
printf("%d\n",c[i]);
return 0;
}
| 8bd82c0f7ff361e5f1c185405220036746ea2aa3.cu | #include <assert.h>
#include <stdio.h>
#include "b_kernel.hu"
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
int main()
{
int a[]={1,2,3,4,5,6};
int b[]={1,2,3,4,5,6};
int c[4];
int i;
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
int *dev_a;
int *dev_b;
int *dev_c;
cudaCheckReturn(cudaMalloc((void **) &dev_a, (4) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_b, (4) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_c, (4) * sizeof(int)));
cudaCheckReturn(cudaMemcpy(dev_a, a, (4) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_b, b, (4) * sizeof(int), cudaMemcpyHostToDevice));
{
dim3 k0_dimBlock(4);
dim3 k0_dimGrid(1);
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_a, dev_b, dev_c);
cudaCheckKernel();
}
cudaCheckReturn(cudaMemcpy(c, dev_c, (4) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFree(dev_a));
cudaCheckReturn(cudaFree(dev_b));
cudaCheckReturn(cudaFree(dev_c));
}
for(i=0;i<4;i++)
printf("%d\n",c[i]);
return 0;
}
|
f2c427909d7336836f357a1fef9e879e845706cf.hip | // !!! This is a file automatically generated by hipify!!!
/**
CUDA
([email protected])
/ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g)
-c:cpu
-r cpu
-g GPU
$ nvcc -O3 CUDA13_N-Queen.cu && ./a.out -g
GPU CUDA
N: Total Unique dd:hh:mm:ss.ms
4: 2 1 00:00:00:00.37
5: 10 2 00:00:00:00.00
6: 4 1 00:00:00:00.00
7: 40 6 00:00:00:00.00
8: 92 12 00:00:00:00.01
9: 352 46 00:00:00:00.01
10: 724 92 00:00:00:00.01
11: 2680 341 00:00:00:00.01
12: 14200 1787 00:00:00:00.02
13: 73712 9233 00:00:00:00.03
14: 365596 45752 00:00:00:00.03
15: 2279184 285053 00:00:00:00.04
16: 14772512 1846955 00:00:00:00.08
17: 95815104 11977939 00:00:00:00.35
18: 666090624 83263591 00:00:00:02.60
19: 4968057848 621012754 00:00:00:22.23
20: 39029188884 4878666808 00:00:03:26.80
21: 314666222712 39333324973 00:00:33:09.52
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <pthread.h>
#define THREAD_NUM 96
#define MAX 27
//
/**
CPU/CPUR
non-recursive/recursive
0
1
*/
int NR;
/**
*/
long TOTAL=0;
long UNIQUE=0;
//
//
// pthread
//
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
//
//
typedef struct{
int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
int mask;
int aBoard[MAX];
long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX];
}local ;
//
__device__ __host__
int symmetryOps(int si,unsigned int *d_aBoard,int BOUND1,int BOUND2,int TOPBIT,int ENDBIT)
{
int own,ptn,you,bit;
//90
if(d_aBoard[BOUND2]==1){ own=1; ptn=2;
while(own<=si-1){ bit=1; you=si-1;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you--; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90180/270 */
if(own>si-1){ return 2; }
}
//180
if(d_aBoard[si-1]==ENDBIT){ own=1; you=si-1-1;
while(own<=si-1){ bit=1; ptn=TOPBIT;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; ptn>>=1; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; you--;
}
/** 90180 */
if(own>si-1){ return 4; }
}
//270
if(d_aBoard[BOUND1]==TOPBIT){ own=1; ptn=TOPBIT>>1;
while(own<=si-1){ bit=1; you=0;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you++; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
//
__global__
void cuda_kernel_b1(
register int size,
register int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
unsigned int* d_uniq,
register int totalCond,
/**11 backTrack1aBoard*********************/
//unsigned int* t_aBoard,
register int h_row,
/**11 BOUND1*********************/
int B1
)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
register unsigned int unique=0;
register int row=0;
register unsigned int bit;
//
//
//
//ID
register unsigned const int tid=threadIdx.x;
//ID
register unsigned const int bid=blockIdx.x;
//ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
/***11 backTrack1aBoard *********************/
//unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUstepstotalCond
if(idx<totalCond){
//totalDown,totalLeft,totalRight
//down,left,right
//CPU t_steps
//
// idx
//
/***11 backTrack1aBoard*********************/
//for(int i=0;i<h_row;i++){
// c_aBoard[i]=t_aBoard[idx*h_row+i]; //1
//}
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
if(row+h_row<B1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
//
//
bitmap[tid][row]
/***11 backTrack1aBoard*********************/
//^=c_aBoard[row+h_row]
//=bit
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
/**11 backTradk1symmetryOps*********************/
//int s=symmetryOps(size,c_aBoard);
//if(s!=0){
//print(size); //print()TOTAL++
//TOTAL
//
unique++;
total+=8; //
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//totalCondtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
d_results[bid]=sum[0];
d_uniq[bid]=usum[0];
}
}
//
//
/***11 cuda_kernel_b2*********************/
__global__
void cuda_kernel_b2(
register int size,
register int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
unsigned int* d_uniq,
register int totalCond,
unsigned int* t_aBoard,
register int h_row,
register int B1,
register int B2,
register int SM,
register int LM,
/***12 symmetryOps TOPBIT,ENDBIT*****/
register int TB,
register int EB
)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
register unsigned int unique=0;
register int row=0;
register unsigned int bit;
//
//
//
//ID
register unsigned const int tid=threadIdx.x;
//ID
register unsigned const int bid=blockIdx.x;
//ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUstepstotalCond
if(idx<totalCond){
//totalDown,totalLeft,totalRight
//down,left,right
//CPU t_steps
//
// idx
//
for(int i=0;i<h_row;i++){
c_aBoard[i]=t_aBoard[idx*h_row+i]; //1
}
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000
//1
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
//
if(row+h_row<B1){
//printf("BOUND1_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]);
bitmap_tid_row=bitmap[tid][row]&=~SM;
//
}else if(row+h_row==B2) {
//printf("BOUND2_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]);
if((down_tid_row&SM)==0){
row--;
continue;
//printf("BOUND2_row\n");
}
if((down_tid_row&SM)!=SM){
bitmap_tid_row=bitmap[tid][row]&=SM;
//printf("BOUND2_SIDEMASK\n");
}
}
int save_bitmap=bitmap[tid][row];
//
//
bitmap[tid][row]
^=c_aBoard[row+h_row]
=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
/***11 LASTMASK*********************/
if((save_bitmap&LM)==0){
/***12 symmetryOps BOUND1,BOUND2,TOPBIT,ENDBIT*****/
int s=symmetryOps(size,c_aBoard,B1,B2,TB,EB);
if(s!=0){
//print(size); //print()TOTAL++
//TOTAL
//
unique++;
total+=s; //
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//totalCondtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
d_results[bid]=sum[0];
d_uniq[bid]=usum[0];
}
}
//
long backTrack2G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1,int BOUND2,int SIDEMASK,int LASTMASK,int TOPBIT,int ENDBIT,unsigned int* aBoard)
{
//GPUGPU
/***11 size<8mark2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* totalDown;
hipHostMalloc((void**) &totalDown,sizeof(int)*steps);
unsigned int* totalLeft;
hipHostMalloc((void**) &totalLeft,sizeof(int)*steps);
unsigned int* totalRight;
hipHostMalloc((void**) &totalRight,sizeof(int)*steps);
unsigned int* h_results;
hipHostMalloc((void**) &h_results,sizeof(int)*steps);
unsigned int* h_uniq;
hipHostMalloc((void**) &h_uniq,sizeof(int)*steps);
unsigned int* t_aBoard;
hipHostMalloc((void**) &t_aBoard,sizeof(int)*steps*mark);
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_uniq;
hipMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_aBoard;
hipMalloc((void**) &d_aBoard,sizeof(int)*steps*mark);
//123CPU->row==mark 3
//down,left,right totalDown,totalLeft,totalRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
/***11 *********************/
//
if(row<BOUND1){
bitmap[row]&=~SIDEMASK;
//
}else if(row==BOUND2) {
if((down[row]&SIDEMASK)==0){ row--; }
if((down[row]&SIDEMASK)!=SIDEMASK){ bitmap[row]&=SIDEMASK; }
}
//06SGPU
bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//totalDown,totalLeft,totalRight
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
t_aBoard[totalCond*mark+i]=aBoard[i];
}
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(d_aBoard,t_aBoard,
sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
/***12 TOPBIT,ENDBIT*********************/
//cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK);
hipLaunchKernelGGL(( cuda_kernel_b2), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//totalDown,totalLeft,totalRight1
// row=2
//totalDown,totalLeft,totalRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(d_aBoard,t_aBoard,
sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
//size-mark GPU totalCond
//steps
//totalCond
/***12 TOPBIT,ENDBIT*********************/
//cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK);
hipLaunchKernelGGL(( cuda_kernel_b2), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
//
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
hipFree(d_uniq);
hipFree(d_aBoard);
hipHostFree(totalDown);
hipHostFree(totalLeft);
hipHostFree(totalRight);
hipHostFree(h_results);
hipHostFree(h_uniq);
hipHostFree(t_aBoard);
return total;
}
//
long backTrack1G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1)
{
//GPUGPU
/***08 mark3*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* totalDown;
hipHostMalloc((void**) &totalDown,sizeof(int)*steps);
unsigned int* totalLeft;
hipHostMalloc((void**) &totalLeft,sizeof(int)*steps);
unsigned int* totalRight;
hipHostMalloc((void**) &totalRight,sizeof(int)*steps);
unsigned int* h_results;
hipHostMalloc((void**) &h_results,sizeof(int)*steps);
unsigned int* h_uniq;
hipHostMalloc((void**) &h_uniq,sizeof(int)*steps);
/***11 backTrack1aBoard*********************/
//unsigned int* t_aBoard;
//hipHostMalloc((void**) &t_aBoard,sizeof(int)*steps*mark);
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_uniq;
hipMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM);
/***11 backTrack1aBoard*********************/
//unsigned int* d_aBoard;
//hipMalloc((void**) &d_aBoard,sizeof(int)*steps*mark);
//123CPU->row==mark 3
//down,left,right totalDown,totalLeft,totalRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
/***11 *********************/
if(row<BOUND1) {
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
//06SGPU
/***11 aBoard*********************/
//bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//totalDown,totalLeft,totalRight
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
/***11 aBoard*********************/
//for(int i=0;i<mark;i++){
// t_aBoard[totalCond*mark+i]=aBoard[i];
//}
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/***11 aBoard*********************/
//hipMemcpy(d_aBoard,t_aBoard,
// sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
/***11 BOUND1*********************/
//cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row);
hipLaunchKernelGGL(( cuda_kernel_b1), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,row,BOUND1);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//totalDown,totalLeft,totalRight1
// row=2
//totalDown,totalLeft,totalRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/***11 aBoard*********************/
//hipMemcpy(d_aBoard,t_aBoard,
// sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
//size-mark GPU totalCond
//steps
//totalCond
/***11 BOUND1*********************/
//cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark);
hipLaunchKernelGGL(( cuda_kernel_b1), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,mark,BOUND1);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
//
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
hipFree(d_uniq);
/***11 aBoard**/
//hipFree(d_aBoard);
hipHostFree(totalDown);
hipHostFree(totalLeft);
hipHostFree(totalRight);
hipHostFree(h_results);
hipHostFree(h_uniq);
/***11 aBoard**/
//hipHostFree(t_aBoard);
return total;
}
//
//GPU
void NQueenG(register int size,register int steps)
{
if(size<=0||size>32){return;}
/**
register
int unsigned
total: TOTAL
sizeE:size-1
*/
unsigned int total=0;
unsigned int sizeE=size-1;
register unsigned int aBoard[MAX];
register int bit=0;
register int mask=((1<<size)-1);
int col=0;//1 0
aBoard[0]=bit=(1<<col);
register int left=bit<<1,down=bit,right=bit>>1;
/**
232
*/
for(register int BOUND1=2;BOUND1<sizeE;BOUND1++){
aBoard[1]=bit=(1<<BOUND1);
total+=backTrack1G(size,mask,2,
(left|bit)<<1,(down|bit),(right|bit)>>1,
steps,BOUND1);
}
register int LASTMASK,SIDEMASK;
register int TOPBIT=1<<(sizeE);
SIDEMASK=LASTMASK=(TOPBIT|1);
register int ENDBIT=(TOPBIT>>1);
/**
12
1/2 n=8 1,2,3 1/2+1 n=9 1,2,3,4
*/
for(register int BOUND1=1,BOUND2=sizeE-1;BOUND1<BOUND2;BOUND1++,BOUND2--){
aBoard[0]=bit=(1<<BOUND1);
total+=backTrack2G(size,mask,1,
bit<<1,bit,bit>>1,
steps,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT,aBoard);
LASTMASK|=LASTMASK>>1|LASTMASK<<1;
ENDBIT>>=1;
}
/**
*/
TOTAL=total;
}
/** CUDA **/
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
void symmetryOps(local *l)
{
int own,ptn,you,bit;
//90
if(l->aBoard[l->BOUND2]==1){ own=1; ptn=2;
while(own<=G.sizeE){ bit=1; you=G.sizeE;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you--; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90180/270 */
if(own>G.sizeE){ l->COUNT2[l->BOUND1]++; return; }
}
//180
if(l->aBoard[G.sizeE]==l->ENDBIT){ own=1; you=G.sizeE-1;
while(own<=G.sizeE){ bit=1; ptn=l->TOPBIT;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; ptn>>=1; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; you--;
}
/** 90180 */
if(own>G.sizeE){ l->COUNT4[l->BOUND1]++; return; }
}
//270
if(l->aBoard[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=G.sizeE){ bit=1; you=0;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you++; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; ptn>>=1;
}
}
l->COUNT8[l->BOUND1]++;
}
//
//CPU backTrack2//
void backTrack2_NR(int row,int h_left,int h_down,int h_right,local *l)
{
unsigned int left[G.size];
unsigned int down[G.size];
unsigned int right[G.size];
unsigned int bitmap[G.size];
left[row]=h_left;
down[row]=h_down;
right[row]=h_right;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
int mark=row;
//
while(row>=mark){//row=1 row>=1, row=2 row>=2
if(bitmap[row]==0){
--row;
}else{
//
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
int save_bitmap=bitmap[row];
bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&l->mask)!=0){
if(row==G.sizeE){
if((save_bitmap&l->LASTMASK)==0){
symmetryOps(l);
--row;
}
}else{
int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
// CPU backTrack2
void backTrack2D_NR(int row,int left,int down,int right,local *l)
{
int bitmap,bit;
int b[100], *p=b;
int odd=G.size&1; //:1 :0
for(int i=0;i<(1+odd);++i){
bitmap=0;
if(0==i){
int half=G.size>>1; // size/2
bitmap=(1<<half)-1;
}else{
bitmap=1<<(G.size>>1);
// down[1]=bitmap;
// right[1]=(bitmap>>1);
// left[1]=(bitmap<<1);
// pnStack=aStack+1;
// *pnStack++=0;
}
mais1:bitmap=l->mask&~(left|down|right);
//
if(row==G.sizeE){
if(bitmap){
//
if((bitmap&l->LASTMASK)==0){
l->aBoard[row]=bitmap;
symmetryOps(l);
}
}
}else{
//
if(row<l->BOUND1){
bitmap&=~l->SIDEMASK;
//
}else if(row==l->BOUND2){
if(!(down&l->SIDEMASK))
goto volta;
if((down&l->SIDEMASK)!=l->SIDEMASK)
bitmap&=l->SIDEMASK;
}
if(bitmap){
outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap;
if(bitmap){
*p++=left;
*p++=down;
*p++=right;
}
*p++=bitmap;
row++;
left=(left|bit)<<1;
down=down|bit;
right=(right|bit)>>1;
goto mais1;
//Backtrack2(y+1, (left | bit)<<1, down | bit, (right | bit)>>1);
volta:if(p<=b)
return;
row--;
bitmap=*--p;
if(bitmap){
right=*--p;
down=*--p;
left=*--p;
goto outro;
}else{
goto volta;
}
}
}
goto volta;
}
}
//CPU backTrack
void backTrack1_NR(int row,int h_left,int h_down,int h_right,local *l)
{
unsigned int left[G.size];
unsigned int down[G.size];
unsigned int right[G.size];
unsigned int bitmap[G.size];
left[row]=h_left;
down[row]=h_down;
right[row]=h_right;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
int mark=row;
//
while(row>=mark){//row=1 row>=1, row=2 row>=2
if(bitmap[row]==0){
--row;
}else{
if(row<l->BOUND1) {
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&l->mask)!=0){
if(row==G.sizeE){
l->COUNT8[l->BOUND1]++;
--row;
}else{
int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
// CPU backTrack
void backTrack1D_NR(int row,int left,int down,int right,local *l)
{
int bitmap,bit;
int b[100], *p=b;
int odd=G.size&1; //:1 :0
for(int i=0;i<(1+odd);++i){
bitmap=0;
if(0==i){
int half=G.size>>1; // size/2
bitmap=(1<<half)-1;
}else{
bitmap=1<<(G.size>>1);
// down[1]=bitmap;
// right[1]=(bitmap>>1);
// left[1]=(bitmap<<1);
// pnStack=aStack+1;
// *pnStack++=0;
}
b1mais1:bitmap=l->mask&~(left|down|right);
//
if(row==G.sizeE){
if(bitmap){
// l->aBoard[row]=bitmap;
l->COUNT8[l->BOUND1]++;
}
}else{
//
//
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2)
}
if(bitmap){
b1outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap;
if(bitmap){
*p++=left;
*p++=down;
*p++=right;
}
*p++=bitmap;
row++;
left=(left|bit)<<1;
down=down|bit;
right=(right|bit)>>1;
goto b1mais1;
//Backtrack1(y+1, (left | bit)<<1, down | bit, (right | bit)>>1);
b1volta:if(p<=b)
return;
row--;
bitmap=*--p;
if(bitmap){
right=*--p;
down=*--p;
left=*--p;
goto b1outro;
}else{
goto b1volta;
}
}
}
goto b1volta;
}
}
//
//CPU backTrack
void backTrack2(int row,int left,int down,int right,local *l)
{
int bitmap=0;
int bit=0;
bitmap=(l->mask&~(left|down|right));
if(row==G.sizeE){
if(bitmap){
//
if((bitmap&l->LASTMASK)==0){
l->aBoard[row]=(-bitmap&bitmap);
symmetryOps(l);
}
}
}else{
//
if(row<l->BOUND1){
bitmap&=~l->SIDEMASK;
//
}else if(row==l->BOUND2) {
if((down&l->SIDEMASK)==0){ return; }
if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; }
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack2(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l);
}
}
}
// CPU backTrack
void backTrack2D(int row,int left,int down,int right,local *l)
{
int bit;
int bitmap=l->mask&~(left|down|right);
if(row==G.sizeE){ //
if(bitmap){
if((bitmap&l->LASTMASK)==0){ //
l->aBoard[row]=bitmap;
symmetryOps(l);
}
}
}else{
if(row<l->BOUND1){ //
bitmap&=~l->SIDEMASK;
}else if(row==l->BOUND2) { //
if((down&l->SIDEMASK)==0){ return; }
if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; }
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack2D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
//CPU backTrack
void backTrack1(int row,int left,int down,int right,local *l)
{
int bitmap=0;
int bit=0;
bitmap=(l->mask&~(left|down|right));
if(row==G.sizeE){
if(bitmap){
l->COUNT8[l->BOUND1]++;
}
}else{
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2)
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack1(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l);
}
}
}
// CPU backTrack
void backTrack1D(int row,int left,int down,int right,local *l)
{
int bit;
int bitmap=l->mask&~(left|down|right);
//
if(row==G.sizeE) {
if(bitmap){
/* l->aBoard[row]=bitmap; */
l->COUNT8[l->BOUND1]++;
}
}else{
//
//
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2)
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack1D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
void *run(void *args)
{
/**
//
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
*/
local *l=(local *)args;
/**
*/
int bit=0;
int col=0;
if(l->BOUND1>1 && l->BOUND1<G.sizeE) {
l->aBoard[0]=bit=(1<<col);
int left=bit<<1;int down=bit;int right=bit>>1;
if(l->BOUND1<G.sizeE) {
col=l->BOUND1;//
l->aBoard[1]=bit=(1<<col);
if(NR==1){//
backTrack1_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU
//backTrack1D_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);
}else{//
backTrack1(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU
//backTrack1D(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//
}
}
}
l->TOPBIT=1<<(G.sizeE);
l->ENDBIT=(l->TOPBIT>>l->BOUND1);
l->SIDEMASK=l->LASTMASK=(l->TOPBIT|1);
/**
*/
if(l->BOUND1>0&&l->BOUND2<G.sizeE&&l->BOUND1<l->BOUND2){
for(int i=1; i<l->BOUND1; i++){
l->LASTMASK=l->LASTMASK|l->LASTMASK>>1|l->LASTMASK<<1;
}
if(l->BOUND1<l->BOUND2){
int col=l->BOUND1;
l->aBoard[0]=bit=(1<<col);
if(NR==1){//
backTrack2_NR(1,bit<<1,bit,bit>>1,l); //GPU
//backTrack2D_NR(1,bit<<1,bit,bit>>1,l);//
}else{//
backTrack2(1,bit<<1,bit,bit>>1,l); //GPU
//backTrack2D(1,bit<<1,bit,bit>>1,l);//
}
}
l->ENDBIT>>=G.size;
}
return 0;//*run()return 0;
}
//pthread
void *NQueenThread()
{
/**
//
typedef struct{
int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
int mask;
int aBoard[MAX];
long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX];
}local ;
*/
local l[MAX];// local
/**
pthread
*/
pthread_t pt[G.size];
/**
*/
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
/**
aBoard
*/
l[BOUND1].mask=(1<<G.size)-1;
l[BOUND1].BOUND1=BOUND1;l[BOUND1].BOUND2=BOUND2;//B1 B2
for(int j=0;j<G.size;j++){ l[l->BOUND1].aBoard[j]=j; }// aB[]
l[BOUND1].COUNT2[BOUND1]=l[BOUND1].COUNT4[BOUND1]=
l[BOUND1].COUNT8[BOUND1]=0;//
/**
pthread_create
BOUND1N
run()
*/
int iFbRet=pthread_create(&pt[BOUND1],NULL,&run,&l[BOUND1]);
if(iFbRet>0){
printf("[mainThread] pthread_create #%d: %d\n", l[BOUND1].BOUND1, iFbRet);
}
}
/**
join()
*/
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
pthread_join(pt[BOUND1],NULL);
}
//
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
G.lTOTAL+=l[BOUND1].COUNT2[BOUND1]*2+
l[BOUND1].COUNT4[BOUND1]*4+l[BOUND1].COUNT8[BOUND1]*8;
G.lUNIQUE+=l[BOUND1].COUNT2[BOUND1]+
l[BOUND1].COUNT4[BOUND1]+l[BOUND1].COUNT8[BOUND1];
}
return 0;
}
// CPU/CPUR(pthread)
void NQueen()
{
/**
CUDA pthread
C C13_N-Queen.c
pthread
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
*/
pthread_t pth; //
int iFbRet;
//pthread
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
//
if(iFbRet>0){
printf("[main] pthread_create: %d\n", iFbRet); //
}
pthread_join(pth,NULL); /* join */
}
//
int main(int argc,char** argv)
{
/**
$ nvcc -O3 CUDA13_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu
-g GPU
-s SGPU()
*/
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else{ gpu=true; } //gpu
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPUR only\n");
printf(" -c: CPU only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/**
$ nvcc pthread
cpu/cpurc
#
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
#
.cu .c
CUDA13_N-Queen.cu -> CUDA13_N-Queen.c
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
$ gcc -Wall -W -O3 -g -ftrapv -std=c99 -pthread CUDA13_N-Queen.c && ./a.out [-c|-r]
*/
if(cpu){
printf("\n\nCPU \n");
printf("pthread\nnvccpthread\n");
}else if(cpur){
printf("\n\nCPUR \n");
printf("pthread\nnvccpthread\n");
}else if(gpu){
printf("\n\nGPU CUDA\n");
}else if(sgpu){
printf("\n\nSGPU CUDA\n");
}
/**
CPUCPUCPURCPU
*/
if(cpu||cpur){
int min=4; int targetN=17;//NN
/**
*/
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
/**
size/sizeE/lTOTAL/lUNIQUE
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
*/
G.size=i;G.sizeE=i-1;//size sizeE
G.lTOTAL=G.lUNIQUE=0;//TOTAL UNIQUE
//
gettimeofday(&t0, NULL);//
/**
CPU/CPUR
non-recursive/recursive
0
1
*/
if(cpur){ //
//NR=0;NQueenD();
NR=0;NQueen();
}
if(cpu){ //
//NR=1;NQueenD();
NR=1;NQueen();
}
//
gettimeofday(&t1, NULL);//
/**
Total Unique dd:hh:mm:ss.ms
15: 2279184 285053 00:00:00:00.33
16: 14772512 1846955 00:00:00:01.59
17: 95815104 11977939 00:00:00:10.92
*/
int ss;int ms;int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
/**
*/
printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n",
i,G.lTOTAL,G.lUNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
/**
GPUGPUSGPU
GPURGPU
*/
if(gpu||sgpu){
/**
CUDA
*/
if(!InitCUDA()){return 0;}
int steps=24576;
int min=4;int targetN=21;//NN
/**
*/
struct timeval t0;
struct timeval t1;
/**
*/
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
/**
*/
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); //
if(gpu){//
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}
gettimeofday(&t1,NULL); //
/**
Total Unique dd:hh:mm:ss.ms
15: 2279184 285053 00:00:00:00.33
16: 14772512 1846955 00:00:00:01.59
17: 95815104 11977939 00:00:00:10.92
*/
int ss;int ms;int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n",
i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}//end for
}//end if
return 0;
}
| f2c427909d7336836f357a1fef9e879e845706cf.cu | /**
CUDAで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎([email protected])
コンパイルと実行
/ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g)
-c:cpu
-r cpu再帰
-g GPU
$ nvcc -O3 CUDA13_N-Queen.cu && ./a.out -g
13.GPU 非再帰 並列処理 CUDA
N: Total Unique dd:hh:mm:ss.ms
4: 2 1 00:00:00:00.37
5: 10 2 00:00:00:00.00
6: 4 1 00:00:00:00.00
7: 40 6 00:00:00:00.00
8: 92 12 00:00:00:00.01
9: 352 46 00:00:00:00.01
10: 724 92 00:00:00:00.01
11: 2680 341 00:00:00:00.01
12: 14200 1787 00:00:00:00.02
13: 73712 9233 00:00:00:00.03
14: 365596 45752 00:00:00:00.03
15: 2279184 285053 00:00:00:00.04
16: 14772512 1846955 00:00:00:00.08
17: 95815104 11977939 00:00:00:00.35
18: 666090624 83263591 00:00:00:02.60
19: 4968057848 621012754 00:00:00:22.23
20: 39029188884 4878666808 00:00:03:26.80
21: 314666222712 39333324973 00:00:33:09.52
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <pthread.h>
#define THREAD_NUM 96
#define MAX 27
//変数宣言
/**
CPU/CPURの場合において、
バックトラックをnon-recursive/recursiveのいずれかを選択
再帰:0
非再帰:1
*/
int NR;
/**
グローバル変数として合計解とユニーク解を格納する変数
*/
long TOTAL=0;
long UNIQUE=0;
//
//変数宣言
// pthreadはパラメータを1つしか渡せないので構造体に格納
//グローバル構造体
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
//
//ローカル構造体
typedef struct{
int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
int mask;
int aBoard[MAX];
long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX];
}local ;
//
__device__ __host__
int symmetryOps(int si,unsigned int *d_aBoard,int BOUND1,int BOUND2,int TOPBIT,int ENDBIT)
{
int own,ptn,you,bit;
//90度回転
if(d_aBoard[BOUND2]==1){ own=1; ptn=2;
while(own<=si-1){ bit=1; you=si-1;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you--; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90度回転して同型なら180度/270度回転も同型である */
if(own>si-1){ return 2; }
}
//180度回転
if(d_aBoard[si-1]==ENDBIT){ own=1; you=si-1-1;
while(own<=si-1){ bit=1; ptn=TOPBIT;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; ptn>>=1; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; you--;
}
/** 90度回転が同型でなくても180度回転が同型である事もある */
if(own>si-1){ return 4; }
}
//270度回転
if(d_aBoard[BOUND1]==TOPBIT){ own=1; ptn=TOPBIT>>1;
while(own<=si-1){ bit=1; you=0;
while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you++; }
if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
//
__global__
void cuda_kernel_b1(
register int size,
register int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
unsigned int* d_uniq,
register int totalCond,
/**11 backTrack1ではaBoard不要のためコメント*********************/
//unsigned int* t_aBoard,
register int h_row,
/**11 BOUND1追加*********************/
int B1
)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
register unsigned int unique=0;
register int row=0;
register unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
register unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
register unsigned const int bid=blockIdx.x;
//全体通してのID
register unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
/***11 backTrack1ではaBoard不要 *********************/
//unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはsteps数起動するがtotalCond以上は空回しする
if(idx<totalCond){
//totalDown,totalLeft,totalRightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
/***11 backTrack1ではaBoard不要*********************/
//for(int i=0;i<h_row;i++){
// c_aBoard[i]=t_aBoard[idx*h_row+i]; //2次元配列だが1次元的に利用
//}
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り**********/
if(row+h_row<B1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]
/***11 backTrack1ではaBoard不要のためコメント*********************/
//^=c_aBoard[row+h_row]
//=bit
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
/**11 backTradk1ではsymmetryOps不要のためコメント*********************/
//int s=symmetryOps(size,c_aBoard);
//if(s!=0){
//print(size); //print()でTOTALを++しない
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=8; //対称解除で得られた解数を加算
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//totalCond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
d_results[bid]=sum[0];
d_uniq[bid]=usum[0];
}
}
//
//
/***11 cuda_kernel_b2新設*********************/
__global__
void cuda_kernel_b2(
register int size,
register int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
unsigned int* d_uniq,
register int totalCond,
unsigned int* t_aBoard,
register int h_row,
register int B1,
register int B2,
register int SM,
register int LM,
/***12 symmetryOps 省力化のためTOPBIT,ENDBITを渡す*****/
register int TB,
register int EB
)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
register unsigned int unique=0;
register int row=0;
register unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
register unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
register unsigned const int bid=blockIdx.x;
//全体通してのID
register unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはsteps数起動するがtotalCond以上は空回しする
if(idx<totalCond){
//totalDown,totalLeft,totalRightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
for(int i=0;i<h_row;i++){
c_aBoard[i]=t_aBoard[idx*h_row+i]; //2次元配列だが1次元的に利用
}
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り追加**********/
//【枝刈り】上部サイド枝刈り
if(row+h_row<B1){
//printf("BOUND1_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]);
bitmap_tid_row=bitmap[tid][row]&=~SM;
//【枝刈り】下部サイド枝刈り
}else if(row+h_row==B2) {
//printf("BOUND2_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]);
if((down_tid_row&SM)==0){
row--;
continue;
//printf("BOUND2_row\n");
}
if((down_tid_row&SM)!=SM){
bitmap_tid_row=bitmap[tid][row]&=SM;
//printf("BOUND2_SIDEMASK\n");
}
}
int save_bitmap=bitmap[tid][row];
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]
^=c_aBoard[row+h_row]
=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
/***11 LASTMASK枝刈り*********************/
if((save_bitmap&LM)==0){
/***12 symmetryOps 省力化のためBOUND1,BOUND2,TOPBIT,ENDBITを渡す*****/
int s=symmetryOps(size,c_aBoard,B1,B2,TB,EB);
if(s!=0){
//print(size); //print()でTOTALを++しない
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=s; //対称解除で得られた解数を加算
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//totalCond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
d_results[bid]=sum[0];
d_uniq[bid]=usum[0];
}
}
//
long backTrack2G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1,int BOUND2,int SIDEMASK,int LASTMASK,int TOPBIT,int ENDBIT,unsigned int* aBoard)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***11 size<8の時はmarkが2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* totalDown;
cudaMallocHost((void**) &totalDown,sizeof(int)*steps);
unsigned int* totalLeft;
cudaMallocHost((void**) &totalLeft,sizeof(int)*steps);
unsigned int* totalRight;
cudaMallocHost((void**) &totalRight,sizeof(int)*steps);
unsigned int* h_results;
cudaMallocHost((void**) &h_results,sizeof(int)*steps);
unsigned int* h_uniq;
cudaMallocHost((void**) &h_uniq,sizeof(int)*steps);
unsigned int* t_aBoard;
cudaMallocHost((void**) &t_aBoard,sizeof(int)*steps*mark);
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_uniq;
cudaMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_aBoard;
cudaMalloc((void**) &d_aBoard,sizeof(int)*steps*mark);
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を totalDown,totalLeft,totalRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
/***11 枝刈り追加*********************/
//【枝刈り】上部サイド枝刈り
if(row<BOUND1){
bitmap[row]&=~SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==BOUND2) {
if((down[row]&SIDEMASK)==0){ row--; }
if((down[row]&SIDEMASK)!=SIDEMASK){ bitmap[row]&=SIDEMASK; }
}
//06SGPU
bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//totalDown,totalLeft,totalRightに格納する
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
t_aBoard[totalCond*mark+i]=aBoard[i];
}
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(d_aBoard,t_aBoard,
sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
/***12 TOPBIT,ENDBIT追加*********************/
//cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK);
cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//totalDown,totalLeft,totalRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(d_aBoard,t_aBoard,
sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
//size-mark は何行GPUを実行するか totalCondはスレッド数
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
/***12 TOPBIT,ENDBIT追加*********************/
//cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK);
cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
//
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
cudaFree(d_uniq);
cudaFree(d_aBoard);
cudaFreeHost(totalDown);
cudaFreeHost(totalLeft);
cudaFreeHost(totalRight);
cudaFreeHost(h_results);
cudaFreeHost(h_uniq);
cudaFreeHost(t_aBoard);
return total;
}
//
long backTrack1G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***08 クイーンを2行目まで固定で置くためmarkが3以上必要*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* totalDown;
cudaMallocHost((void**) &totalDown,sizeof(int)*steps);
unsigned int* totalLeft;
cudaMallocHost((void**) &totalLeft,sizeof(int)*steps);
unsigned int* totalRight;
cudaMallocHost((void**) &totalRight,sizeof(int)*steps);
unsigned int* h_results;
cudaMallocHost((void**) &h_results,sizeof(int)*steps);
unsigned int* h_uniq;
cudaMallocHost((void**) &h_uniq,sizeof(int)*steps);
/***11 backTrack1ではaBoard不要のためコメント*********************/
//unsigned int* t_aBoard;
//cudaMallocHost((void**) &t_aBoard,sizeof(int)*steps*mark);
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
unsigned int* d_uniq;
cudaMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM);
/***11 backTrack1ではaBoard不要のためコメント*********************/
//unsigned int* d_aBoard;
//cudaMalloc((void**) &d_aBoard,sizeof(int)*steps*mark);
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を totalDown,totalLeft,totalRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
/***11 枝刈り*********************/
if(row<BOUND1) {
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
//06SGPU
/***11 aBoard不要*********************/
//bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//totalDown,totalLeft,totalRightに格納する
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
/***11 aBoardコメント*********************/
//for(int i=0;i<mark;i++){
// t_aBoard[totalCond*mark+i]=aBoard[i];
//}
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/***11 aBoard不要のためコメント*********************/
//cudaMemcpy(d_aBoard,t_aBoard,
// sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
/***11 BOUND1追加*********************/
//cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row);
cuda_kernel_b1<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,row,BOUND1);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//totalDown,totalLeft,totalRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/***11 aBoard不要のためコメント*********************/
//cudaMemcpy(d_aBoard,t_aBoard,
// sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
//size-mark は何行GPUを実行するか totalCondはスレッド数
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
/***11 BOUND1追加*********************/
//cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
// >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark);
cuda_kernel_b1<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,mark,BOUND1);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(h_uniq,d_uniq,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
UNIQUE+=h_uniq[col];
}
//
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
cudaFree(d_uniq);
/***11 aBoardコメント**/
//cudaFree(d_aBoard);
cudaFreeHost(totalDown);
cudaFreeHost(totalLeft);
cudaFreeHost(totalRight);
cudaFreeHost(h_results);
cudaFreeHost(h_uniq);
/***11 aBoardコメント**/
//cudaFreeHost(t_aBoard);
return total;
}
//
//GPU
void NQueenG(register int size,register int steps)
{
if(size<=0||size>32){return;}
/**
パラメータは渡す変数はregisterとする
int型は unsigned とする
total: グローバル変数TOTALへのアクセスを極小化する
sizeE:size-1といった計算を変数に格納しフラット化する
*/
unsigned int total=0;
unsigned int sizeE=size-1;
register unsigned int aBoard[MAX];
register int bit=0;
register int mask=((1<<size)-1);
int col=0;//1行め右端 0
aBoard[0]=bit=(1<<col);
register int left=bit<<1,down=bit,right=bit>>1;
/**
2行目は右から3列目から左端から2列目まで
*/
for(register int BOUND1=2;BOUND1<sizeE;BOUND1++){
aBoard[1]=bit=(1<<BOUND1);
total+=backTrack1G(size,mask,2,
(left|bit)<<1,(down|bit),(right|bit)>>1,
steps,BOUND1);
}
register int LASTMASK,SIDEMASK;
register int TOPBIT=1<<(sizeE);
SIDEMASK=LASTMASK=(TOPBIT|1);
register int ENDBIT=(TOPBIT>>1);
/**
1行目右から2列目から
偶数個は1/2 n=8 なら 1,2,3 奇数個は1/2+1 n=9 なら 1,2,3,4
*/
for(register int BOUND1=1,BOUND2=sizeE-1;BOUND1<BOUND2;BOUND1++,BOUND2--){
aBoard[0]=bit=(1<<BOUND1);
total+=backTrack2G(size,mask,1,
bit<<1,bit,bit>>1,
steps,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT,aBoard);
LASTMASK|=LASTMASK>>1|LASTMASK<<1;
ENDBIT>>=1;
}
/**
グローバル変数へのアクセスを極小化する
*/
TOTAL=total;
}
/** CUDA 初期化 **/
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//
void symmetryOps(local *l)
{
int own,ptn,you,bit;
//90度回転
if(l->aBoard[l->BOUND2]==1){ own=1; ptn=2;
while(own<=G.sizeE){ bit=1; you=G.sizeE;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you--; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90度回転して同型なら180度/270度回転も同型である */
if(own>G.sizeE){ l->COUNT2[l->BOUND1]++; return; }
}
//180度回転
if(l->aBoard[G.sizeE]==l->ENDBIT){ own=1; you=G.sizeE-1;
while(own<=G.sizeE){ bit=1; ptn=l->TOPBIT;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; ptn>>=1; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; you--;
}
/** 90度回転が同型でなくても180度回転が同型である事もある */
if(own>G.sizeE){ l->COUNT4[l->BOUND1]++; return; }
}
//270度回転
if(l->aBoard[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=G.sizeE){ bit=1; you=0;
while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you++; }
if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; }
own++; ptn>>=1;
}
}
l->COUNT8[l->BOUND1]++;
}
//
//CPU 非再帰版 backTrack2//新しく記述
void backTrack2_NR(int row,int h_left,int h_down,int h_right,local *l)
{
unsigned int left[G.size];
unsigned int down[G.size];
unsigned int right[G.size];
unsigned int bitmap[G.size];
left[row]=h_left;
down[row]=h_down;
right[row]=h_right;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
int mark=row;
//固定していれた行より上はいかない
while(row>=mark){//row=1 row>=1, row=2 row>=2
if(bitmap[row]==0){
--row;
}else{
//【枝刈り】上部サイド枝刈り
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
int save_bitmap=bitmap[row];
bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&l->mask)!=0){
if(row==G.sizeE){
if((save_bitmap&l->LASTMASK)==0){
symmetryOps(l);
--row;
}
}else{
int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//通常版 CPU 非再帰版 backTrack2
void backTrack2D_NR(int row,int left,int down,int right,local *l)
{
int bitmap,bit;
int b[100], *p=b;
int odd=G.size&1; //奇数:1 偶数:0
for(int i=0;i<(1+odd);++i){
bitmap=0;
if(0==i){
int half=G.size>>1; // size/2
bitmap=(1<<half)-1;
}else{
bitmap=1<<(G.size>>1);
// down[1]=bitmap;
// right[1]=(bitmap>>1);
// left[1]=(bitmap<<1);
// pnStack=aStack+1;
// *pnStack++=0;
}
mais1:bitmap=l->mask&~(left|down|right);
// 【枝刈り】
if(row==G.sizeE){
if(bitmap){
//【枝刈り】 最下段枝刈り
if((bitmap&l->LASTMASK)==0){
l->aBoard[row]=bitmap;
symmetryOps(l);
}
}
}else{
//【枝刈り】上部サイド枝刈り
if(row<l->BOUND1){
bitmap&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==l->BOUND2){
if(!(down&l->SIDEMASK))
goto volta;
if((down&l->SIDEMASK)!=l->SIDEMASK)
bitmap&=l->SIDEMASK;
}
if(bitmap){
outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap;
if(bitmap){
*p++=left;
*p++=down;
*p++=right;
}
*p++=bitmap;
row++;
left=(left|bit)<<1;
down=down|bit;
right=(right|bit)>>1;
goto mais1;
//Backtrack2(y+1, (left | bit)<<1, down | bit, (right | bit)>>1);
volta:if(p<=b)
return;
row--;
bitmap=*--p;
if(bitmap){
right=*--p;
down=*--p;
left=*--p;
goto outro;
}else{
goto volta;
}
}
}
goto volta;
}
}
//CPU 非再帰版 backTrack
void backTrack1_NR(int row,int h_left,int h_down,int h_right,local *l)
{
unsigned int left[G.size];
unsigned int down[G.size];
unsigned int right[G.size];
unsigned int bitmap[G.size];
left[row]=h_left;
down[row]=h_down;
right[row]=h_right;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
int mark=row;
//固定していれた行より上はいかない
while(row>=mark){//row=1 row>=1, row=2 row>=2
if(bitmap[row]==0){
--row;
}else{
if(row<l->BOUND1) {
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&l->mask)!=0){
if(row==G.sizeE){
l->COUNT8[l->BOUND1]++;
--row;
}else{
int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=l->mask&~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//通常版 CPU 非再帰版 backTrack
void backTrack1D_NR(int row,int left,int down,int right,local *l)
{
int bitmap,bit;
int b[100], *p=b;
int odd=G.size&1; //奇数:1 偶数:0
for(int i=0;i<(1+odd);++i){
bitmap=0;
if(0==i){
int half=G.size>>1; // size/2
bitmap=(1<<half)-1;
}else{
bitmap=1<<(G.size>>1);
// down[1]=bitmap;
// right[1]=(bitmap>>1);
// left[1]=(bitmap<<1);
// pnStack=aStack+1;
// *pnStack++=0;
}
b1mais1:bitmap=l->mask&~(left|down|right);
//【枝刈り】1行目角にクイーンがある場合回転対称チェックを省略
if(row==G.sizeE){
if(bitmap){
// l->aBoard[row]=bitmap;
l->COUNT8[l->BOUND1]++;
}
}else{
//【枝刈り】鏡像についても主対角線鏡像のみを判定すればよい
// 2行目、2列目を数値とみなし、2行目<2列目という条件を課せばよい
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
if(bitmap){
b1outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap;
if(bitmap){
*p++=left;
*p++=down;
*p++=right;
}
*p++=bitmap;
row++;
left=(left|bit)<<1;
down=down|bit;
right=(right|bit)>>1;
goto b1mais1;
//Backtrack1(y+1, (left | bit)<<1, down | bit, (right | bit)>>1);
b1volta:if(p<=b)
return;
row--;
bitmap=*--p;
if(bitmap){
right=*--p;
down=*--p;
left=*--p;
goto b1outro;
}else{
goto b1volta;
}
}
}
goto b1volta;
}
}
//
//CPU 再帰版 backTrack
void backTrack2(int row,int left,int down,int right,local *l)
{
int bitmap=0;
int bit=0;
bitmap=(l->mask&~(left|down|right));
if(row==G.sizeE){
if(bitmap){
//【枝刈り】 最下段枝刈り
if((bitmap&l->LASTMASK)==0){
l->aBoard[row]=(-bitmap&bitmap);
symmetryOps(l);
}
}
}else{
//【枝刈り】上部サイド枝刈り
if(row<l->BOUND1){
bitmap&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==l->BOUND2) {
if((down&l->SIDEMASK)==0){ return; }
if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; }
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack2(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l);
}
}
}
//通常版 CPU 再帰版 backTrack
void backTrack2D(int row,int left,int down,int right,local *l)
{
int bit;
int bitmap=l->mask&~(left|down|right);
if(row==G.sizeE){ // 【枝刈り】
if(bitmap){
if((bitmap&l->LASTMASK)==0){ //【枝刈り】 最下段枝刈り
l->aBoard[row]=bitmap;
symmetryOps(l);
}
}
}else{
if(row<l->BOUND1){ //【枝刈り】上部サイド枝刈り
bitmap&=~l->SIDEMASK;
}else if(row==l->BOUND2) { //【枝刈り】下部サイド枝刈り
if((down&l->SIDEMASK)==0){ return; }
if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; }
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack2D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
//CPU 再帰版 backTrack
void backTrack1(int row,int left,int down,int right,local *l)
{
int bitmap=0;
int bit=0;
bitmap=(l->mask&~(left|down|right));
if(row==G.sizeE){
if(bitmap){
l->COUNT8[l->BOUND1]++;
}
}else{
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack1(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l);
}
}
}
//通常版 CPU 再帰版 backTrack
void backTrack1D(int row,int left,int down,int right,local *l)
{
int bit;
int bitmap=l->mask&~(left|down|right);
//【枝刈り】1行目角にクイーンがある場合回転対称チェックを省略
if(row==G.sizeE) {
if(bitmap){
/* l->aBoard[row]=bitmap; */
l->COUNT8[l->BOUND1]++;
}
}else{
//【枝刈り】鏡像についても主対角線鏡像のみを判定すればよい
// 2行目、2列目を数値とみなし、2行目<2列目という条件を課せばよい
if(row<l->BOUND1) {
bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
while(bitmap){
bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap);
backTrack1D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//チルドスレッドの実行処理
void *run(void *args)
{
/**
//グローバル構造体
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
*/
local *l=(local *)args;
/**
最上段のクイーンが「角」にある場合の探索
*/
int bit=0;
int col=0;
if(l->BOUND1>1 && l->BOUND1<G.sizeE) {
l->aBoard[0]=bit=(1<<col);
int left=bit<<1;int down=bit;int right=bit>>1;
if(l->BOUND1<G.sizeE) {
col=l->BOUND1;// 角にクイーンを配置
l->aBoard[1]=bit=(1<<col);
if(NR==1){//2行目から非再帰
backTrack1_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU適用版
//backTrack1D_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);
}else{//2行目から再帰
backTrack1(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU適用版
//backTrack1D(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//通常版
}
}
}
l->TOPBIT=1<<(G.sizeE);
l->ENDBIT=(l->TOPBIT>>l->BOUND1);
l->SIDEMASK=l->LASTMASK=(l->TOPBIT|1);
/**
最上段行のクイーンが「角以外」にある場合の探索
ユニーク解に対する左右対称解を予め削除するには、
左半分だけにクイーンを配置するようにすればよい
*/
if(l->BOUND1>0&&l->BOUND2<G.sizeE&&l->BOUND1<l->BOUND2){
for(int i=1; i<l->BOUND1; i++){
l->LASTMASK=l->LASTMASK|l->LASTMASK>>1|l->LASTMASK<<1;
}
if(l->BOUND1<l->BOUND2){
int col=l->BOUND1;
l->aBoard[0]=bit=(1<<col);
if(NR==1){//2行目から非再帰
backTrack2_NR(1,bit<<1,bit,bit>>1,l); //GPU適用版
//backTrack2D_NR(1,bit<<1,bit,bit>>1,l);//通常版
}else{//2行目から再帰
backTrack2(1,bit<<1,bit,bit>>1,l); //GPU適用版
//backTrack2D(1,bit<<1,bit,bit>>1,l);//通常版
}
}
l->ENDBIT>>=G.size;
}
return 0;//*run()の場合はreturn 0;が必要
}
//pthreadによるスレッド生成
void *NQueenThread()
{
/**
//ローカル構造体
typedef struct{
int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
int mask;
int aBoard[MAX];
long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX];
}local ;
*/
local l[MAX];//構造体 local型
/**
pthreadのチルドスレッド
*/
pthread_t pt[G.size];
/**
初期化とチルドスレッドの生成
*/
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
/**
aBoardとカウンターの初期化
*/
l[BOUND1].mask=(1<<G.size)-1;
l[BOUND1].BOUND1=BOUND1;l[BOUND1].BOUND2=BOUND2;//B1 と B2を初期化
for(int j=0;j<G.size;j++){ l[l->BOUND1].aBoard[j]=j; }// aB[]の初期化
l[BOUND1].COUNT2[BOUND1]=l[BOUND1].COUNT4[BOUND1]=
l[BOUND1].COUNT8[BOUND1]=0;//カウンターの初期化
/**
チルドスレッドの生成
pthread_createでチルドスレッドを生成します。
BOUND1がインクリメントされ、Nの数だけチルドスレッドが生成されます。
run()がチルドスレッドの実行関数となります。
*/
int iFbRet=pthread_create(&pt[BOUND1],NULL,&run,&l[BOUND1]);
if(iFbRet>0){
printf("[mainThread] pthread_create #%d: %d\n", l[BOUND1].BOUND1, iFbRet);
}
}
/**
チルドスレッドが実行され、全ての処理が完了するまでjoin()により待機します。
*/
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
pthread_join(pt[BOUND1],NULL);
}
//スレッド毎のカウンターを合計
for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){
G.lTOTAL+=l[BOUND1].COUNT2[BOUND1]*2+
l[BOUND1].COUNT4[BOUND1]*4+l[BOUND1].COUNT8[BOUND1]*8;
G.lUNIQUE+=l[BOUND1].COUNT2[BOUND1]+
l[BOUND1].COUNT4[BOUND1]+l[BOUND1].COUNT8[BOUND1];
}
return 0;
}
// CPU/CPURの並列処理(pthread)
void NQueen()
{
/**
メインスレッドの生成
拡張子 CUDA はpthreadをサポートしていませんので実行できません
コンパイルが通らないので 以下をコメントアウトします
Cディレクトリの 並列処理はC13_N-Queen.c を参考にして下さい。
pthreadを使いたいときはここのコメントアウトを外します
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
*/
pthread_t pth; //スレッド変数
int iFbRet;
//pthreadを使いたいときはここのコメントアウトを外します
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
//
if(iFbRet>0){
printf("[main] pthread_create: %d\n", iFbRet); //エラー出力デバッグ用
}
pthread_join(pth,NULL); /* いちいちjoinをする */
}
//
int main(int argc,char** argv)
{
/**
実行パラメータの処理
$ nvcc -O3 CUDA13_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu再帰
-g GPU
-s SGPU(サマーズ版と思われる)
*/
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else{ gpu=true; } //デフォルトをgpuとする
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPUR only\n");
printf(" -c: CPU only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/**
出力
$ nvcc コマンドではpthreadは動きません
cpu/cpurを実行したい場合はファイル名の拡張子をcに変更した上で、
以下の行頭のコメントを外してください。
#
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
#
1.ソースファイルを複写し、
複写したソースファイルの拡張子を .cu から .cにリネームする
CUDA13_N-Queen.cu -> CUDA13_N-Queen.c
2.ソースファイルから以下の行を探し、行頭のコメントを外してください。
//iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL);
3.以下のコマンドで実行する
$ gcc -Wall -W -O3 -g -ftrapv -std=c99 -pthread CUDA13_N-Queen.c && ./a.out [-c|-r]
*/
if(cpu){
printf("\n\n13.CPU 非再帰 並列処理\n");
printf("pthread\n※nvccではpthreadは動きません!\n");
}else if(cpur){
printf("\n\n13.CPUR 再帰 並列処理\n");
printf("pthread\n※nvccではpthreadは動きません!\n");
}else if(gpu){
printf("\n\n13.GPU 非再帰 並列処理 CUDA\n");
}else if(sgpu){
printf("\n\n13.SGPU 非再帰 並列処理 CUDA\n");
}
/**
CPU(CPUによる非再起処理)とCPUR(CPUによる再帰処理)の実行
*/
if(cpu||cpur){
int min=4; int targetN=17;//実行の開始Nと終了Nの指定
/**
処理時刻計測のための変数
*/
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
/**
size/sizeE/lTOTAL/lUNIQUEといった変数は構造体に格納されています。
typedef struct {
int size;
int sizeE;
long lTOTAL,lUNIQUE;
}GCLASS, *GClass;
GCLASS G;
*/
G.size=i;G.sizeE=i-1;//size sizeEの初期化
G.lTOTAL=G.lUNIQUE=0;//TOTAL UNIQUEの初期化
//
gettimeofday(&t0, NULL);//計測開始
/**
CPU/CPURの場合において、
バックトラックをnon-recursive/recursiveのいずれかを選択
再帰:0
非再帰:1
*/
if(cpur){ //再帰
//NR=0;NQueenD();
NR=0;NQueen();
}
if(cpu){ //非再帰
//NR=1;NQueenD();
NR=1;NQueen();
}
//
gettimeofday(&t1, NULL);//計測終了
/**
時刻表記の処理
Total Unique dd:hh:mm:ss.ms
15: 2279184 285053 00:00:00:00.33
16: 14772512 1846955 00:00:00:01.59
17: 95815104 11977939 00:00:00:10.92
*/
int ss;int ms;int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
/**
出力
*/
printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n",
i,G.lTOTAL,G.lUNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
/**
GPU(過去の記録樹立者サマーズGPU版SGPU処理)と
GPUR(GPUによる再帰処理)の実行
*/
if(gpu||sgpu){
/**
実行時にデバイスがCUDAがサポートされているかを確認
*/
if(!InitCUDA()){return 0;}
int steps=24576;
int min=4;int targetN=21;//実行の開始Nと終了Nの指定
/**
処理時刻計測のための変数
*/
struct timeval t0;
struct timeval t1;
/**
出力
*/
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
/**
実行処理
*/
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // 計測開始
if(gpu){//本ソースのメイン処理
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}
gettimeofday(&t1,NULL); // 計測終了
/**
時刻表記の処理
Total Unique dd:hh:mm:ss.ms
15: 2279184 285053 00:00:00:00.33
16: 14772512 1846955 00:00:00:01.59
17: 95815104 11977939 00:00:00:10.92
*/
int ss;int ms;int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n",
i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}//end for
}//end if
return 0;
}
|
3ee8898e0941df45b04627d909d37ca38af69476.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
//-----------------------------------------------------------------------------
__global__
void addKernel(double* a, double* b, double* c, const int n)
{
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
c[i] = a[i] + b[i];
}
| 3ee8898e0941df45b04627d909d37ca38af69476.cu | #include "kernels.cuh"
//-----------------------------------------------------------------------------
__global__
void addKernel(double* a, double* b, double* c, const int n)
{
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
c[i] = a[i] + b[i];
}
|
058caf133a07075509f4ea2ccd299d5b5e20f0a5.hip | // !!! This is a file automatically generated by hipify!!!
%%cu
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 1000
__global__ void per_row_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < m){
for(unsigned long long i = 0; i < n; ++i){
C[row*n + i] = A[row*n + i] + B[row*n + i];
}
}
}
__global__ void per_column_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long col = (blockIdx.x * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
if (col < n){
for(unsigned long long i = 0; i < m; ++i){
C[i*n + col] = A[i*n + col] + B[i*n + col];
}
}
}
__global__ void per_element_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long id = ((blockIdx.y*gridDim.x+blockIdx.x)*(blockDim.x*blockDim.y))+(threadIdx.y*blockDim.x+threadIdx.x);
if (id < m*n){
C[id] = A[id] + B[id];
}
}
int main(){
int A[N], B[N], C[N];
for(int i = 0; i < N; ++i)
{
A[i] = i+1;
B[i] = 2*i+2;
C[i] = 0;
}
int* gpuA, *gpuB, *gpuC;
hipMalloc(&gpuA, sizeof(int) * N);
hipMalloc(&gpuB, sizeof(int) * N);
hipMalloc(&gpuC, sizeof(int) * N);
hipMemcpy(gpuA, A, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(gpuB, B, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(gpuC, C, sizeof(int) * N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( per_element_kernel), dim3(10), dim3(128), 0, 0, 20,50,gpuA,gpuB,gpuC);
hipDeviceSynchronize();
hipMemcpy(C, gpuC, sizeof(int) * N, hipMemcpyDeviceToHost);
for(int i = 0; i < N; ++i)
{
printf("%d ",C[i]);
if((i+1)%50==0)
printf("\n");
}
} | 058caf133a07075509f4ea2ccd299d5b5e20f0a5.cu | %%cu
#include <stdio.h>
#include <cuda.h>
#define N 1000
__global__ void per_row_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < m){
for(unsigned long long i = 0; i < n; ++i){
C[row*n + i] = A[row*n + i] + B[row*n + i];
}
}
}
__global__ void per_column_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long col = (blockIdx.x * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
if (col < n){
for(unsigned long long i = 0; i < m; ++i){
C[i*n + col] = A[i*n + col] + B[i*n + col];
}
}
}
__global__ void per_element_kernel(int m,int n,int *A,int *B,int *C){
unsigned long long id = ((blockIdx.y*gridDim.x+blockIdx.x)*(blockDim.x*blockDim.y))+(threadIdx.y*blockDim.x+threadIdx.x);
if (id < m*n){
C[id] = A[id] + B[id];
}
}
int main(){
int A[N], B[N], C[N];
for(int i = 0; i < N; ++i)
{
A[i] = i+1;
B[i] = 2*i+2;
C[i] = 0;
}
int* gpuA, *gpuB, *gpuC;
cudaMalloc(&gpuA, sizeof(int) * N);
cudaMalloc(&gpuB, sizeof(int) * N);
cudaMalloc(&gpuC, sizeof(int) * N);
cudaMemcpy(gpuA, A, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(gpuB, B, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(gpuC, C, sizeof(int) * N, cudaMemcpyHostToDevice);
per_element_kernel<<<10, 128>>>(20,50,gpuA,gpuB,gpuC);
cudaThreadSynchronize();
cudaMemcpy(C, gpuC, sizeof(int) * N, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i)
{
printf("%d ",C[i]);
if((i+1)%50==0)
printf("\n");
}
} |
c08214717b6e31d17f76a1ee17a11a7ffa5d3cb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_MEAN_ABSOLUTE_ERROR_LAYER_INSTANTIATE
#include "lbann/layers/loss/mean_absolute_error.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
TensorDataType* __restrict__ contribution) {
// Indices
const int tid = threadIdx.x;
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute local contribution for each matrix column
for (int col = bidy; col < local_width; col += gridDim.y) {
// Compute contributions for each thread
TensorDataType private_contribution = TensorDataType(0.0);
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
private_contribution += gpu_lib::abs(x - xhat);
}
// Shared memory reduction to get contribution for each block
/// @todo unroll loops
__shared__ TensorDataType shared_contribution[block_size];
shared_contribution[tid] = private_contribution;
for (int stride = block_size / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tid < stride) {
shared_contribution[tid] += shared_contribution[tid + stride];
}
}
if (tid == 0) {
shared_contribution[0] /= global_height;
gpu_lib::atomic_add(&contribution[col], shared_contribution[0]);
}
}
}
template <typename TensorDataType>
void local_fp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
El::AbstractMatrix<TensorDataType>& local_contribution) {
El::Zero(local_contribution);
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_contribution),
gpu::get_sync_info(local_ground_truth),
gpu::get_sync_info(local_prediction));
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_contribution.Buffer());
}
}
template <int block_size, typename TensorDataType>
__global__ void bp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
TensorDataType* __restrict__ gradient_wrt_prediction,
int gradient_wrt_prediction_ldim,
TensorDataType* __restrict__ gradient_wrt_ground_truth,
int gradient_wrt_ground_truth_ldim) {
// Indices
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute gradients
for (int col = bidy; col < local_width; col += gridDim.y) {
const auto& dy = gradient_wrt_output[col];
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
auto& dx = gradient_wrt_prediction[row + col * gradient_wrt_prediction_ldim];
auto& dxhat = gradient_wrt_ground_truth[row + col * gradient_wrt_ground_truth_ldim];
const TensorDataType global_height_dt = TensorDataType(global_height);
if (x > xhat) {
dx = dy / global_height_dt;
dxhat = -dy / global_height_dt;
} else if (x < xhat) {
dx = -dy / global_height_dt;
dxhat = dy / global_height_dt;
} else {
dx = TensorDataType(0.0);
dxhat = TensorDataType(0.0);
}
}
}
}
template <typename TensorDataType>
void local_bp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_prediction,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_ground_truth) {
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_gradient_wrt_prediction),
gpu::get_sync_info(local_gradient_wrt_ground_truth),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_ground_truth),
gpu::get_sync_info(local_prediction));
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_prediction.Buffer(),
local_gradient_wrt_prediction.LDim(),
local_gradient_wrt_ground_truth.Buffer(),
local_gradient_wrt_ground_truth.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_fp_compute() {
local_fp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->Matrix());
}
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_bp_compute() {
local_bp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->LockedMatrix(),
this->get_local_error_signals(0),
this->get_local_error_signals(1));
}
#define PROTO(T) \
template class mean_absolute_error_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class mean_absolute_error_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| c08214717b6e31d17f76a1ee17a11a7ffa5d3cb5.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_MEAN_ABSOLUTE_ERROR_LAYER_INSTANTIATE
#include "lbann/layers/loss/mean_absolute_error.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
TensorDataType* __restrict__ contribution) {
// Indices
const int tid = threadIdx.x;
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute local contribution for each matrix column
for (int col = bidy; col < local_width; col += gridDim.y) {
// Compute contributions for each thread
TensorDataType private_contribution = TensorDataType(0.0);
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
private_contribution += gpu_lib::abs(x - xhat);
}
// Shared memory reduction to get contribution for each block
/// @todo unroll loops
__shared__ TensorDataType shared_contribution[block_size];
shared_contribution[tid] = private_contribution;
for (int stride = block_size / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tid < stride) {
shared_contribution[tid] += shared_contribution[tid + stride];
}
}
if (tid == 0) {
shared_contribution[0] /= global_height;
gpu_lib::atomic_add(&contribution[col], shared_contribution[0]);
}
}
}
template <typename TensorDataType>
void local_fp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
El::AbstractMatrix<TensorDataType>& local_contribution) {
El::Zero(local_contribution);
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_contribution),
gpu::get_sync_info(local_ground_truth),
gpu::get_sync_info(local_prediction));
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_contribution.Buffer());
}
}
template <int block_size, typename TensorDataType>
__global__ void bp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
TensorDataType* __restrict__ gradient_wrt_prediction,
int gradient_wrt_prediction_ldim,
TensorDataType* __restrict__ gradient_wrt_ground_truth,
int gradient_wrt_ground_truth_ldim) {
// Indices
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute gradients
for (int col = bidy; col < local_width; col += gridDim.y) {
const auto& dy = gradient_wrt_output[col];
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
auto& dx = gradient_wrt_prediction[row + col * gradient_wrt_prediction_ldim];
auto& dxhat = gradient_wrt_ground_truth[row + col * gradient_wrt_ground_truth_ldim];
const TensorDataType global_height_dt = TensorDataType(global_height);
if (x > xhat) {
dx = dy / global_height_dt;
dxhat = -dy / global_height_dt;
} else if (x < xhat) {
dx = -dy / global_height_dt;
dxhat = dy / global_height_dt;
} else {
dx = TensorDataType(0.0);
dxhat = TensorDataType(0.0);
}
}
}
}
template <typename TensorDataType>
void local_bp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_prediction,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_ground_truth) {
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_gradient_wrt_prediction),
gpu::get_sync_info(local_gradient_wrt_ground_truth),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_ground_truth),
gpu::get_sync_info(local_prediction));
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_prediction.Buffer(),
local_gradient_wrt_prediction.LDim(),
local_gradient_wrt_ground_truth.Buffer(),
local_gradient_wrt_ground_truth.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_fp_compute() {
local_fp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->Matrix());
}
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_bp_compute() {
local_bp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->LockedMatrix(),
this->get_local_error_signals(0),
this->get_local_error_signals(1));
}
#define PROTO(T) \
template class mean_absolute_error_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class mean_absolute_error_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
5051859736f37e94ff8149f44f7a501ef8685ce9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#define H(a) (-a * logf(a))
#define H2(a1, a2, sum) (H((a1) / (sum)) + H((a2) / (sum)))
#define H3(a1, a2, a3, sum) (H((a1) / (sum)) + H((a2) / (sum)) + H((a3) / (sum)))
#define H6(a1, a2, a3, a4, a5, a6, sum) (H((a1) / (sum)) + H((a2) / (sum)) + \
H((a3) / (sum)) + H((a4) / (sum)) + \
H((a5) / (sum)) + H((a6) / (sum)))
#define H9(a1, a2, a3, a4, a5, a6, a7, a8, a9, sum) \
(H3(a1, a2, a3, sum) + H3(a4, a5, a6, sum) + H3(a7, a8, a9, sum))
#define H18(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, sum) \
(H6(a1, a2, a3, a4, a5, a6, sum) + \
H6(a7, a8, a9, a10, a11, a12, sum) + \
H6(a13, a14, a15, a16, a17, a18, sum))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - macierz wartoci zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
* - wynikowe GIG
*/
__global__ void compute_gig(int *vars, int *ds, int num_objects, float *r_gig)
{
int count[2][3][3];
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p <= v2_p) return;
for (int i = 0; i < num_objects; ++i) {
int d = ds[i]; //(ds[i / 8] << (i % 8)) & 1;
int v1 = vars[v1_p * num_objects + i]; //(vars[v1_p * num_objects + i / 4] << (i % 4)) & 3;
int v2 = vars[v2_p * num_objects + i]; //(vars[v2_p * num_objects + i / 4] << (i % 4)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12;
ig1 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H3(SUM_N1_N3(count, 0), SUM_N1_N3(count, 1), SUM_N1_N3(count, 2), num_objects) -
H6(SUM_N3(count, 0, 0), SUM_N3(count, 0, 1), SUM_N3(count, 0, 2),
SUM_N3(count, 1, 0), SUM_N3(count, 1, 1), SUM_N3(count, 1, 2), num_objects);
ig2 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H3(SUM_N1_N2(count, 0), SUM_N1_N2(count, 1), SUM_N1_N2(count, 2), num_objects) -
H6(SUM_N2(count, 0, 0), SUM_N2(count, 0, 1), SUM_N2(count, 0, 2),
SUM_N2(count, 1, 0), SUM_N2(count, 1, 1), SUM_N2(count, 1, 2), num_objects);
ig12 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H9(SUM_N1(count, 0, 0), SUM_N1(count, 0, 1), SUM_N1(count, 0, 2),
SUM_N1(count, 1, 0), SUM_N1(count, 1, 1), SUM_N1(count, 1, 2),
SUM_N1(count, 2, 0), SUM_N1(count, 2, 1), SUM_N1(count, 2, 2), num_objects) -
H18(count[0][0][0], count[0][0][1], count[0][0][2], count[0][1][0], count[0][1][1], count[0][1][2],
count[0][2][0], count[0][2][1], count[0][2][2], count[1][0][0], count[1][0][1], count[1][0][2],
count[1][1][0], count[1][1][1], count[1][1][2], count[1][2][0], count[1][2][1], count[1][2][2],
num_objects);
r_gig[v1_p * num_objects + v2_p] = ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
int main()
{
int num_objects, num_vars, *ds, *vars;
scanf("%d %d", &num_objects, &num_vars);
malloc(...);
for (int i = 0; i < n; ++i) {
scanf("%d", &ds[i]);
for (int j = 0; j < k; ++j)
scanf("%d", vars[i * num_vars + j]);
}
compute_gig<<<>>>();
return 0;
}
| 5051859736f37e94ff8149f44f7a501ef8685ce9.cu | #include "util.h"
#define H(a) (-a * logf(a))
#define H2(a1, a2, sum) (H((a1) / (sum)) + H((a2) / (sum)))
#define H3(a1, a2, a3, sum) (H((a1) / (sum)) + H((a2) / (sum)) + H((a3) / (sum)))
#define H6(a1, a2, a3, a4, a5, a6, sum) (H((a1) / (sum)) + H((a2) / (sum)) + \
H((a3) / (sum)) + H((a4) / (sum)) + \
H((a5) / (sum)) + H((a6) / (sum)))
#define H9(a1, a2, a3, a4, a5, a6, a7, a8, a9, sum) \
(H3(a1, a2, a3, sum) + H3(a4, a5, a6, sum) + H3(a7, a8, a9, sum))
#define H18(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, sum) \
(H6(a1, a2, a3, a4, a5, a6, sum) + \
H6(a7, a8, a9, a10, a11, a12, sum) + \
H6(a13, a14, a15, a16, a17, a18, sum))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - macierz wartości zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
* - wynikowe GIG
*/
__global__ void compute_gig(int *vars, int *ds, int num_objects, float *r_gig)
{
int count[2][3][3];
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p <= v2_p) return;
for (int i = 0; i < num_objects; ++i) {
int d = ds[i]; //(ds[i / 8] << (i % 8)) & 1;
int v1 = vars[v1_p * num_objects + i]; //(vars[v1_p * num_objects + i / 4] << (i % 4)) & 3;
int v2 = vars[v2_p * num_objects + i]; //(vars[v2_p * num_objects + i / 4] << (i % 4)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12;
ig1 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H3(SUM_N1_N3(count, 0), SUM_N1_N3(count, 1), SUM_N1_N3(count, 2), num_objects) -
H6(SUM_N3(count, 0, 0), SUM_N3(count, 0, 1), SUM_N3(count, 0, 2),
SUM_N3(count, 1, 0), SUM_N3(count, 1, 1), SUM_N3(count, 1, 2), num_objects);
ig2 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H3(SUM_N1_N2(count, 0), SUM_N1_N2(count, 1), SUM_N1_N2(count, 2), num_objects) -
H6(SUM_N2(count, 0, 0), SUM_N2(count, 0, 1), SUM_N2(count, 0, 2),
SUM_N2(count, 1, 0), SUM_N2(count, 1, 1), SUM_N2(count, 1, 2), num_objects);
ig12 = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), num_objects) +
H9(SUM_N1(count, 0, 0), SUM_N1(count, 0, 1), SUM_N1(count, 0, 2),
SUM_N1(count, 1, 0), SUM_N1(count, 1, 1), SUM_N1(count, 1, 2),
SUM_N1(count, 2, 0), SUM_N1(count, 2, 1), SUM_N1(count, 2, 2), num_objects) -
H18(count[0][0][0], count[0][0][1], count[0][0][2], count[0][1][0], count[0][1][1], count[0][1][2],
count[0][2][0], count[0][2][1], count[0][2][2], count[1][0][0], count[1][0][1], count[1][0][2],
count[1][1][0], count[1][1][1], count[1][1][2], count[1][2][0], count[1][2][1], count[1][2][2],
num_objects);
r_gig[v1_p * num_objects + v2_p] = ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
int main()
{
int num_objects, num_vars, *ds, *vars;
scanf("%d %d", &num_objects, &num_vars);
malloc(...);
for (int i = 0; i < n; ++i) {
scanf("%d", &ds[i]);
for (int j = 0; j < k; ++j)
scanf("%d", vars[i * num_vars + j]);
}
compute_gig<<<>>>();
return 0;
}
|
67a9545b962e128b83d0ccaed01b789955abd5b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "../inc/ising.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 4
//functions
__global__ void calculateNewSpinKernel(int * M, int * newM, double * w, int n, int * flag);
void ising( int *G, double *w, int k , int n)
{
struct timeval startwtime, endwtime;
double time = 0;
//flag to terminate if no changes are made
int terminate_flag;
int * d_terminate_flag;
//for pointer swap
int * temp;
//cuda
int * d_G, *d_newG;
double * d_w;
//cuda mallocs
hipMalloc(&d_terminate_flag,sizeof(int));
hipMalloc(&d_G, n*n*sizeof(int));
hipMalloc(&d_newG, n*n*sizeof(int));
hipMalloc(&d_w, 5*5*sizeof(double));
//cuda memcpy G and W
hipMemcpy(d_G, G, n*n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_w, w, 5*5*sizeof(double), hipMemcpyHostToDevice);
//declare block size and grid size
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid(GRID_SIZE, GRID_SIZE);
//k steps iterations
for(int i= 0 ; i < k ;i++)
{
//reset flag value
terminate_flag = 1;
hipMemcpy(d_terminate_flag, &terminate_flag,sizeof(int), hipMemcpyHostToDevice);
//call kernel
gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( calculateNewSpinKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_G,d_newG,d_w,n,d_terminate_flag);
hipDeviceSynchronize();
gettimeofday (&endwtime, NULL);
time += (double)((endwtime.tv_usec - startwtime.tv_usec)
/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
//swap pointers
temp = d_G;
d_G = d_newG;
d_newG = temp;
//we need device value for flag here
hipMemcpy(&terminate_flag, d_terminate_flag, sizeof(int), hipMemcpyDeviceToHost);
if (terminate_flag == 1)
{
break;
}
printf("Kernel time: %f seconds\n", time );
hipMemcpy(G,d_G, n*n*sizeof(int), hipMemcpyDeviceToHost);
//hipFree
}
hipFree(d_newG);
hipFree(d_G);
hipFree(d_w);
}
//kernel function
__global__ void calculateNewSpinKernel(int * M, int * newM, double * w, int n, int * flag)
{
//indeces
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
int thread_id = col* n + row;
//guard for extra threads
if(thread_id < n*n)
{
//add for loop back and implement grid stride
for( int stride_thread_id = thread_id; stride_thread_id<n*n; stride_thread_id += (blockDim.x * gridDim.x))
{
double influence = 0;
//coordinates
int y = stride_thread_id / n;
int x = stride_thread_id % n;
for (int k=-2; k<=2;k++)
{
for(int l= -2; l<=2; l++)
{
influence += w[(2+k)*5+(2+l)] * M[((k + y + n) % n) * n + (l + x + n) % n];
}
}
//influence float point error
if(fabs(influence) < 10e-7)
{
newM[stride_thread_id] = M[stride_thread_id];
}
else if(influence>0)
{
if(M[stride_thread_id]!=1)
*flag = 0;
newM[stride_thread_id] = 1;
}
else if(influence<0)
{
if(M[stride_thread_id]!=-1)
*flag = 0;
newM[stride_thread_id] = -1;
}
}
}
}
| 67a9545b962e128b83d0ccaed01b789955abd5b2.cu | #include "../inc/ising.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 4
//functions
__global__ void calculateNewSpinKernel(int * M, int * newM, double * w, int n, int * flag);
void ising( int *G, double *w, int k , int n)
{
struct timeval startwtime, endwtime;
double time = 0;
//flag to terminate if no changes are made
int terminate_flag;
int * d_terminate_flag;
//for pointer swap
int * temp;
//cuda
int * d_G, *d_newG;
double * d_w;
//cuda mallocs
cudaMalloc(&d_terminate_flag,sizeof(int));
cudaMalloc(&d_G, n*n*sizeof(int));
cudaMalloc(&d_newG, n*n*sizeof(int));
cudaMalloc(&d_w, 5*5*sizeof(double));
//cuda memcpy G and W
cudaMemcpy(d_G, G, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_w, w, 5*5*sizeof(double), cudaMemcpyHostToDevice);
//declare block size and grid size
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid(GRID_SIZE, GRID_SIZE);
//k steps iterations
for(int i= 0 ; i < k ;i++)
{
//reset flag value
terminate_flag = 1;
cudaMemcpy(d_terminate_flag, &terminate_flag,sizeof(int), cudaMemcpyHostToDevice);
//call kernel
gettimeofday (&startwtime, NULL);
calculateNewSpinKernel<<<blocksPerGrid, threadsPerBlock>>>(d_G,d_newG,d_w,n,d_terminate_flag);
cudaDeviceSynchronize();
gettimeofday (&endwtime, NULL);
time += (double)((endwtime.tv_usec - startwtime.tv_usec)
/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
//swap pointers
temp = d_G;
d_G = d_newG;
d_newG = temp;
//we need device value for flag here
cudaMemcpy(&terminate_flag, d_terminate_flag, sizeof(int), cudaMemcpyDeviceToHost);
if (terminate_flag == 1)
{
break;
}
printf("Kernel time: %f seconds\n", time );
cudaMemcpy(G,d_G, n*n*sizeof(int), cudaMemcpyDeviceToHost);
//cudaFree
}
cudaFree(d_newG);
cudaFree(d_G);
cudaFree(d_w);
}
//kernel function
__global__ void calculateNewSpinKernel(int * M, int * newM, double * w, int n, int * flag)
{
//indeces
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
int thread_id = col* n + row;
//guard for extra threads
if(thread_id < n*n)
{
//add for loop back and implement grid stride
for( int stride_thread_id = thread_id; stride_thread_id<n*n; stride_thread_id += (blockDim.x * gridDim.x))
{
double influence = 0;
//coordinates
int y = stride_thread_id / n;
int x = stride_thread_id % n;
for (int k=-2; k<=2;k++)
{
for(int l= -2; l<=2; l++)
{
influence += w[(2+k)*5+(2+l)] * M[((k + y + n) % n) * n + (l + x + n) % n];
}
}
//influence float point error
if(fabs(influence) < 10e-7)
{
newM[stride_thread_id] = M[stride_thread_id];
}
else if(influence>0)
{
if(M[stride_thread_id]!=1)
*flag = 0;
newM[stride_thread_id] = 1;
}
else if(influence<0)
{
if(M[stride_thread_id]!=-1)
*flag = 0;
newM[stride_thread_id] = -1;
}
}
}
}
|
cc59bb5600d5977c7df7ab490048deb9364d7459.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// DESCRIPTION HERE
#include <stdio.h>
#include <stdint.h>
#include <iostream>
#include <chrono>
#define NUM_THREADS 448
#define NUM_CHARS 26
#define SHARED_MEM_SIZE NUM_THREADS * NUM_CHARS
// Calculate how much padding is needed to make the file evenly divided into 448 chunks
size_t calculatePadding(size_t fileSize)
{
size_t paddingBytes = NUM_THREADS - (fileSize % NUM_THREADS);
return paddingBytes;
}
// Pad the data so it is evenly divided into 448 chunks
void padData(uint8_t * buf, size_t bytesRead, size_t numPadBytes)
{
for (size_t i = 0; i < numPadBytes; i++)
{
buf[bytesRead + i] = 'a';
}
}
// Read a file into a byte array
uint8_t * readFile(const char * filename, size_t * outBytesRead, size_t * paddingBytes)
{
FILE *handle = fopen(filename, "rb");
fseek(handle, 0, SEEK_END);
*outBytesRead = ftell(handle);
*paddingBytes = calculatePadding(*outBytesRead);
rewind(handle);
uint8_t * buf = (uint8_t *) malloc((*outBytesRead + *paddingBytes)*sizeof(uint8_t));
fread(buf, *outBytesRead, 1, handle);
fclose(handle);
padData(buf, *outBytesRead, *paddingBytes);
return buf;
}
// Shift all ascii letters so that 'a' is index 0, 'b' is index 1, etc.
__device__ __constant__ int shiftAmount;
__global__ void shiftLetters(uint8_t *data)
{
uint32_t threadId = blockIdx.x * blockDim.x + threadIdx.x;
data[threadId] = data[threadId] - shiftAmount;
}
// Zero out the letter counts
__device__ void zeroLetterCounts(uint32_t * letterCounts)
{
for (size_t i = 0; i < NUM_CHARS; i++) {
letterCounts[(threadIdx.x * NUM_CHARS) + i] = 0;
}
}
// Count the occurence of each letter in *data
__device__ void countLetters(uint8_t *data, uint32_t *letterCounts, uint32_t *threadLetterCounts, size_t chunkSize)
{
zeroLetterCounts(threadLetterCounts);
__syncthreads();
// Tally letters for each thread
for (size_t i = 0; i < chunkSize; i++)
{
threadLetterCounts[(threadIdx.x * NUM_CHARS) + data[(threadIdx.x * chunkSize) + i]]++;
}
__syncthreads();
// Total local thread tallys
if (threadIdx.x < NUM_CHARS)
{
for (size_t i = 0; i < NUM_THREADS; i++)
{
letterCounts[threadIdx.x] += threadLetterCounts[threadIdx.x + (i * NUM_CHARS)];
}
}
}
// Count the occurence of each letter in *data using shared memory
__global__ void countLettersShared(uint8_t *data, uint32_t *letterCounts, size_t chunkSize)
{
__shared__ uint32_t sharedLetterCounts[SHARED_MEM_SIZE];
countLetters(data, letterCounts, sharedLetterCounts, chunkSize);
}
// Count the occurence of each letter in *data using global memory
__global__ void countLettersGlobal(uint8_t *data, uint32_t *letterCounts, uint32_t * threadLetterCounts, size_t chunkSize)
{
countLetters(data, letterCounts, threadLetterCounts, chunkSize);
}
// Remove any padding so that letter counts are accurage
void unpadResult(uint32_t * letterCounts, size_t paddingBytes)
{
letterCounts[0] -= paddingBytes;
}
// Count the occurence of each letter in *data using shared memory
uint64_t countWithGPUShared(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize)
{
// Declare cuda memory
uint8_t *gpuData;
uint32_t *gpuLetterCounts;
hipMalloc((void **)&gpuData, dataSize);
hipMemcpy(gpuData, data, dataSize, hipMemcpyHostToDevice);
hipMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t));
hipMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), hipMemcpyHostToDevice);
// Run Kernel
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( shiftLetters), dim3(textChunkSize), dim3(NUM_THREADS), 0, 0, gpuData);
hipLaunchKernelGGL(( countLettersShared), dim3(1), dim3(NUM_THREADS), 0, 0, gpuData, gpuLetterCounts, textChunkSize);
auto stop = std::chrono::high_resolution_clock::now();
hipMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), hipMemcpyDeviceToHost);
// Free the arrays on the GPU as now we're done with them
hipFree(gpuData);
hipFree(gpuLetterCounts);
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Count the occurence of each letter in *data using global memory
uint64_t countWithGPUGlobal(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize)
{
// Declare cuda memory
uint8_t *gpuData;
uint32_t *gpuLetterCounts;
uint32_t *threadLetterCounts;
hipMalloc((void **)&gpuData, dataSize);
hipMemcpy(gpuData, data, dataSize, hipMemcpyHostToDevice);
hipMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t));
hipMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), hipMemcpyHostToDevice);
hipMalloc((void **)&threadLetterCounts, SHARED_MEM_SIZE);
// Run Kernel
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( shiftLetters), dim3(textChunkSize), dim3(NUM_THREADS), 0, 0, gpuData);
hipLaunchKernelGGL(( countLettersGlobal), dim3(1), dim3(NUM_THREADS), 0, 0, gpuData, gpuLetterCounts, threadLetterCounts, textChunkSize);
auto stop = std::chrono::high_resolution_clock::now();
hipMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), hipMemcpyDeviceToHost);
/* Free the arrays on the GPU as now we're done with them */
hipFree(gpuData);
hipFree(gpuLetterCounts);
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Use the CPU to count the occurrences of each letter in *data
uint64_t countWithCPU(uint8_t * data, size_t dataSize, uint32_t * letterCounts, int ascii_a)
{
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < dataSize; i++)
{
letterCounts[data[i] - ascii_a]++;
}
auto stop = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Display letter counts
void displayResults(uint32_t * letterCounts)
{
printf("\n\n");
for (size_t i = 0; i < NUM_CHARS; i++)
{
printf("Found %d %c's\n", letterCounts[i], i + 0x61);
}
printf("\n\n");
}
// Display and analyze the run times (shared vs. global vs. CPU)
void displayTimingResults(uint64_t gpuSharedDuration, uint64_t gpuGlobalDuration, uint64_t cpuDuration)
{
printf("Took %dns to run processing on GPU with shared memory\n", gpuSharedDuration);
printf("Took %dns to run processing on GPU with global memory\n", gpuGlobalDuration);
printf("Took %dns to run on CPU\n", cpuDuration);
printf("\n");
printf("Shared Memory runs %fx faster than global memory\n", ((double)gpuGlobalDuration) / gpuSharedDuration);
printf("Shared Memory on GPU runs %fx faster than the CPU\n", ((double)cpuDuration) / gpuSharedDuration);
printf("\n");
}
int main(int argc, char* argv[])
{
// Read command line args
std::string fileName = "all_letter.shakespeare.txt";
if (argc > 1) {
fileName = argv[1];
}
// Copy from host to constant memory
const int ascii_a = 0x61;
hipMemcpyToSymbol(shiftAmount, &ascii_a, sizeof(uint8_t));
// Declare some variables
uint32_t letterCounts[NUM_CHARS];
size_t bytesRead;
size_t paddingBytes;
// Read file
uint8_t * data = readFile(fileName.c_str(), &bytesRead, &paddingBytes);
// Calculate run-time parameters
size_t dataSize = bytesRead + paddingBytes;
size_t textChunkSize = dataSize / NUM_THREADS;
printf("Bytes read: %d\n", bytesRead);
printf("Padding bytes: %d\n", paddingBytes);
uint8_t *pinnedData;
hipHostMalloc((void**)&pinnedData, dataSize);
memcpy(pinnedData, data, dataSize);
// Run letter counter on the CPU
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t cpuDuration = countWithCPU(pinnedData, dataSize, letterCounts, ascii_a);
// Run letter counter on the GPU with global memory
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t gpuGlobalDuration = countWithGPUGlobal(pinnedData, dataSize, letterCounts, textChunkSize);
// Run letter counter on the GPU with shared memory
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t gpuSharedDuration = countWithGPUShared(pinnedData, dataSize, letterCounts, textChunkSize);
unpadResult(letterCounts, paddingBytes);
// Display letter counts and timing
displayResults(letterCounts);
displayTimingResults(gpuSharedDuration, gpuGlobalDuration, cpuDuration);
return EXIT_SUCCESS;
}
| cc59bb5600d5977c7df7ab490048deb9364d7459.cu | // DESCRIPTION HERE
#include <stdio.h>
#include <stdint.h>
#include <iostream>
#include <chrono>
#define NUM_THREADS 448
#define NUM_CHARS 26
#define SHARED_MEM_SIZE NUM_THREADS * NUM_CHARS
// Calculate how much padding is needed to make the file evenly divided into 448 chunks
size_t calculatePadding(size_t fileSize)
{
size_t paddingBytes = NUM_THREADS - (fileSize % NUM_THREADS);
return paddingBytes;
}
// Pad the data so it is evenly divided into 448 chunks
void padData(uint8_t * buf, size_t bytesRead, size_t numPadBytes)
{
for (size_t i = 0; i < numPadBytes; i++)
{
buf[bytesRead + i] = 'a';
}
}
// Read a file into a byte array
uint8_t * readFile(const char * filename, size_t * outBytesRead, size_t * paddingBytes)
{
FILE *handle = fopen(filename, "rb");
fseek(handle, 0, SEEK_END);
*outBytesRead = ftell(handle);
*paddingBytes = calculatePadding(*outBytesRead);
rewind(handle);
uint8_t * buf = (uint8_t *) malloc((*outBytesRead + *paddingBytes)*sizeof(uint8_t));
fread(buf, *outBytesRead, 1, handle);
fclose(handle);
padData(buf, *outBytesRead, *paddingBytes);
return buf;
}
// Shift all ascii letters so that 'a' is index 0, 'b' is index 1, etc.
__device__ __constant__ int shiftAmount;
__global__ void shiftLetters(uint8_t *data)
{
uint32_t threadId = blockIdx.x * blockDim.x + threadIdx.x;
data[threadId] = data[threadId] - shiftAmount;
}
// Zero out the letter counts
__device__ void zeroLetterCounts(uint32_t * letterCounts)
{
for (size_t i = 0; i < NUM_CHARS; i++) {
letterCounts[(threadIdx.x * NUM_CHARS) + i] = 0;
}
}
// Count the occurence of each letter in *data
__device__ void countLetters(uint8_t *data, uint32_t *letterCounts, uint32_t *threadLetterCounts, size_t chunkSize)
{
zeroLetterCounts(threadLetterCounts);
__syncthreads();
// Tally letters for each thread
for (size_t i = 0; i < chunkSize; i++)
{
threadLetterCounts[(threadIdx.x * NUM_CHARS) + data[(threadIdx.x * chunkSize) + i]]++;
}
__syncthreads();
// Total local thread tallys
if (threadIdx.x < NUM_CHARS)
{
for (size_t i = 0; i < NUM_THREADS; i++)
{
letterCounts[threadIdx.x] += threadLetterCounts[threadIdx.x + (i * NUM_CHARS)];
}
}
}
// Count the occurence of each letter in *data using shared memory
__global__ void countLettersShared(uint8_t *data, uint32_t *letterCounts, size_t chunkSize)
{
__shared__ uint32_t sharedLetterCounts[SHARED_MEM_SIZE];
countLetters(data, letterCounts, sharedLetterCounts, chunkSize);
}
// Count the occurence of each letter in *data using global memory
__global__ void countLettersGlobal(uint8_t *data, uint32_t *letterCounts, uint32_t * threadLetterCounts, size_t chunkSize)
{
countLetters(data, letterCounts, threadLetterCounts, chunkSize);
}
// Remove any padding so that letter counts are accurage
void unpadResult(uint32_t * letterCounts, size_t paddingBytes)
{
letterCounts[0] -= paddingBytes;
}
// Count the occurence of each letter in *data using shared memory
uint64_t countWithGPUShared(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize)
{
// Declare cuda memory
uint8_t *gpuData;
uint32_t *gpuLetterCounts;
cudaMalloc((void **)&gpuData, dataSize);
cudaMemcpy(gpuData, data, dataSize, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t));
cudaMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyHostToDevice);
// Run Kernel
auto start = std::chrono::high_resolution_clock::now();
shiftLetters<<<textChunkSize, NUM_THREADS>>>(gpuData);
countLettersShared<<<1, NUM_THREADS>>>(gpuData, gpuLetterCounts, textChunkSize);
auto stop = std::chrono::high_resolution_clock::now();
cudaMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyDeviceToHost);
// Free the arrays on the GPU as now we're done with them
cudaFree(gpuData);
cudaFree(gpuLetterCounts);
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Count the occurence of each letter in *data using global memory
uint64_t countWithGPUGlobal(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize)
{
// Declare cuda memory
uint8_t *gpuData;
uint32_t *gpuLetterCounts;
uint32_t *threadLetterCounts;
cudaMalloc((void **)&gpuData, dataSize);
cudaMemcpy(gpuData, data, dataSize, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t));
cudaMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMalloc((void **)&threadLetterCounts, SHARED_MEM_SIZE);
// Run Kernel
auto start = std::chrono::high_resolution_clock::now();
shiftLetters<<<textChunkSize, NUM_THREADS>>>(gpuData);
countLettersGlobal<<<1, NUM_THREADS>>>(gpuData, gpuLetterCounts, threadLetterCounts, textChunkSize);
auto stop = std::chrono::high_resolution_clock::now();
cudaMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyDeviceToHost);
/* Free the arrays on the GPU as now we're done with them */
cudaFree(gpuData);
cudaFree(gpuLetterCounts);
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Use the CPU to count the occurrences of each letter in *data
uint64_t countWithCPU(uint8_t * data, size_t dataSize, uint32_t * letterCounts, int ascii_a)
{
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < dataSize; i++)
{
letterCounts[data[i] - ascii_a]++;
}
auto stop = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
// Display letter counts
void displayResults(uint32_t * letterCounts)
{
printf("\n\n");
for (size_t i = 0; i < NUM_CHARS; i++)
{
printf("Found %d %c's\n", letterCounts[i], i + 0x61);
}
printf("\n\n");
}
// Display and analyze the run times (shared vs. global vs. CPU)
void displayTimingResults(uint64_t gpuSharedDuration, uint64_t gpuGlobalDuration, uint64_t cpuDuration)
{
printf("Took %dns to run processing on GPU with shared memory\n", gpuSharedDuration);
printf("Took %dns to run processing on GPU with global memory\n", gpuGlobalDuration);
printf("Took %dns to run on CPU\n", cpuDuration);
printf("\n");
printf("Shared Memory runs %fx faster than global memory\n", ((double)gpuGlobalDuration) / gpuSharedDuration);
printf("Shared Memory on GPU runs %fx faster than the CPU\n", ((double)cpuDuration) / gpuSharedDuration);
printf("\n");
}
int main(int argc, char* argv[])
{
// Read command line args
std::string fileName = "all_letter.shakespeare.txt";
if (argc > 1) {
fileName = argv[1];
}
// Copy from host to constant memory
const int ascii_a = 0x61;
cudaMemcpyToSymbol(shiftAmount, &ascii_a, sizeof(uint8_t));
// Declare some variables
uint32_t letterCounts[NUM_CHARS];
size_t bytesRead;
size_t paddingBytes;
// Read file
uint8_t * data = readFile(fileName.c_str(), &bytesRead, &paddingBytes);
// Calculate run-time parameters
size_t dataSize = bytesRead + paddingBytes;
size_t textChunkSize = dataSize / NUM_THREADS;
printf("Bytes read: %d\n", bytesRead);
printf("Padding bytes: %d\n", paddingBytes);
uint8_t *pinnedData;
cudaMallocHost((void**)&pinnedData, dataSize);
memcpy(pinnedData, data, dataSize);
// Run letter counter on the CPU
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t cpuDuration = countWithCPU(pinnedData, dataSize, letterCounts, ascii_a);
// Run letter counter on the GPU with global memory
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t gpuGlobalDuration = countWithGPUGlobal(pinnedData, dataSize, letterCounts, textChunkSize);
// Run letter counter on the GPU with shared memory
memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t));
uint64_t gpuSharedDuration = countWithGPUShared(pinnedData, dataSize, letterCounts, textChunkSize);
unpadResult(letterCounts, paddingBytes);
// Display letter counts and timing
displayResults(letterCounts);
displayTimingResults(gpuSharedDuration, gpuGlobalDuration, cpuDuration);
return EXIT_SUCCESS;
}
|
84295e9f56276b7e78d461c8207c2e1e8f8e906c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
}
__syncthreads();
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nz] + hx[tk] );
}
}
__global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
ex[tk+1] = Ex[idx];
ey[tk+1] = Ey[idx];
ez[tk] = Ez[idx];
if ( tk==0 ) {
ex[0] = Ex[idx-1];
ey[0] = Ey[idx-1];
}
}
__syncthreads();
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
Hx[idx] -= 0.5*( ez[tk] - Ez[idx-Nz] - ey[tk+1] + ey[tk] );
Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[idx-Nyz] );
Hz[idx] -= 0.5*( ey[tk+1] - Ey[idx-Nyz] - ex[tk+1] + Ex[idx-Nz] );
}
}
| 84295e9f56276b7e78d461c8207c2e1e8f8e906c.cu | __global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
}
__syncthreads();
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nz] + hx[tk] );
}
}
__global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
ex[tk+1] = Ex[idx];
ey[tk+1] = Ey[idx];
ez[tk] = Ez[idx];
if ( tk==0 ) {
ex[0] = Ex[idx-1];
ey[0] = Ey[idx-1];
}
}
__syncthreads();
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
Hx[idx] -= 0.5*( ez[tk] - Ez[idx-Nz] - ey[tk+1] + ey[tk] );
Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[idx-Nyz] );
Hz[idx] -= 0.5*( ey[tk+1] - Ey[idx-Nyz] - ex[tk+1] + Ex[idx-Nz] );
}
}
|
026fd3da25d8ffe4d499dcd1aa6b67b307f018b2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduceSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
int *odata = NULL;
hipMalloc(&odata, XSIZE*YSIZE);
unsigned int ncols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduceSum), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,odata,ncols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduceSum), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,odata,ncols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduceSum), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,odata,ncols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 026fd3da25d8ffe4d499dcd1aa6b67b307f018b2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduceSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
int *odata = NULL;
cudaMalloc(&odata, XSIZE*YSIZE);
unsigned int ncols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduceSum<<<gridBlock,threadBlock>>>(idata,odata,ncols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduceSum<<<gridBlock,threadBlock>>>(idata,odata,ncols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduceSum<<<gridBlock,threadBlock>>>(idata,odata,ncols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
728a3896d8febf5405b752edad0e1a2806064e6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "AdamOptimizer.h"
// template class AdamOptimizer<int>;
template class AdamOptimizer<float>;
// template class AdamOptimizer<double>;
/*!
@brief .
@details UpdateParameterOnGPU .
@details 1 block thread .
@param pDevWeight GPU data.
@param pDevAccGradient gradient.
@param weightDim dimension.
@param signed_learning_rate Optimizer .
@param beta1 FirstMomentum
@param beta2 FirstVelocity
@param epsilon 0
@param weightDecayRate .
@param pDevFirstMomentum pDevFirstMomentum
@param pDevFirstVelocity pDevFirstVelocity
@see int AdamOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pFirstMomentum, Tensor<DTYPE> *pFirstVelocity, Tensor<DTYPE> *pUnbiasedMomentum, Tensor<DTYPE> *pUnbiasedVelocity)
*/
__global__ void AdamUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float beta1, float beta2, float epsilon, float weightDecayRate, float *pDevFirstMomentum, float *pDevFirstVelocity) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pDevFirstMomentum[idx] = beta1 * pDevFirstMomentum[idx] + (1.F - beta1) * g; // m (1st moment)
pDevFirstVelocity[idx] = beta2 * pDevFirstVelocity[idx] + (1.F - beta2) * g * g; // v (2nd moment)
float m2 = pDevFirstMomentum[idx] / (1.F - beta1);
float v2 = pDevFirstVelocity[idx] / (1.F - beta2);
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt(v2 + epsilon) * m2;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief NagOptimizer UpdateParameterOnGPU .
@details GPU , .
@details noBlock GPU block
@details threadsPerBlock block thread
@details m_parameterDim dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared GPU GPU data. CPU data GetGPUData() GPU data
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details AdagradUpdate_kernel . , , thread GPU .
@see __global__ void AdamUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float beta1, float beta2, float epsilon, float weightDecayRate, float *pDevFirstMomentum, float *pDevFirstVelocity)
@param *pParameter Tensor Operator
@param pFirstMomentum pFirstMomentum
@param pFirstVelocity pFirstVelocity
@param pUnbiasedMomentum pUnbiasedMomentum
@param pUnbiasedVelocity pUnbiasedVelocity
@return TRUE
*/
template<typename DTYPE> int AdamOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pFirstMomentum, Tensor<DTYPE> *pFirstVelocity, Tensor<DTYPE> *pUnbiasedMomentum, Tensor<DTYPE> *pUnbiasedVelocity) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
// GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevFirstMomentum = pFirstMomentum->GetGPUData();
DTYPE *m_pDevFirstVelocity = pFirstVelocity->GetGPUData();
AdamUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_Beta1, m_Beta2, m_epsilon, weightDecayRate, m_pDevFirstMomentum, m_pDevFirstVelocity);
return TRUE;
}
#endif // ifdef __CUDNN__
| 728a3896d8febf5405b752edad0e1a2806064e6f.cu | #ifdef __CUDNN__
#include "AdamOptimizer.h"
// template class AdamOptimizer<int>;
template class AdamOptimizer<float>;
// template class AdamOptimizer<double>;
/*!
@brief 파라미터 값들을 업데이트 하는 커널함수.
@details UpdateParameterOnGPU 생성자에서 호출되어 실행.
@details 1차원으로 배열 된 block과 thread에 접근하여 연산.
@param pDevWeight 업데이트 할 파라미터의 GPU data.
@param pDevAccGradient 업데이트 할 파라미터의 gradient.
@param weightDim 업데이트 할 파라미터의 dimension.
@param signed_learning_rate Optimizer의 학습률.
@param beta1 FirstMomentum 조정 가중치 값
@param beta2 FirstVelocity 조정 가중치 값
@param epsilon 분모 값이 0이 되는 것을 방지 하는 값
@param weightDecayRate 가중치 매개변수가 클 때 패널티를 부과하는 값.
@param pDevFirstMomentum 업데이트 될 pDevFirstMomentum
@param pDevFirstVelocity 업데이트 될 pDevFirstVelocity
@see int AdamOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pFirstMomentum, Tensor<DTYPE> *pFirstVelocity, Tensor<DTYPE> *pUnbiasedMomentum, Tensor<DTYPE> *pUnbiasedVelocity)
*/
__global__ void AdamUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float beta1, float beta2, float epsilon, float weightDecayRate, float *pDevFirstMomentum, float *pDevFirstVelocity) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
pDevFirstMomentum[idx] = beta1 * pDevFirstMomentum[idx] + (1.F - beta1) * g; // m (1st moment)
pDevFirstVelocity[idx] = beta2 * pDevFirstVelocity[idx] + (1.F - beta2) * g * g; // v (2nd moment)
float m2 = pDevFirstMomentum[idx] / (1.F - beta1);
float v2 = pDevFirstVelocity[idx] / (1.F - beta2);
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += signed_learning_rate / sqrt(v2 + epsilon) * m2;
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief NagOptimizer UpdateParameterOnGPU 생성자.
@details GPU변수를 생성하고, 커널 함수를 실행한다.
@details noBlock는 GPU 연산시 사용되는 block의 수
@details threadsPerBlock는 한 block당 생성되는 thread 갯수
@details m_parameterDim는 업데이트 할 파라미터의 dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared는 GPU함수 연산에 수행되는 GPU data. 각 CPU data를 GetGPUData() 호출로 GPU data 생성
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details AdagradUpdate_kernel 커널 함수를 호출. 커널함수이름, 블록 수, 블록당 thread 수와 GPU데이터를 다음과 같은 형식으로 호출.
@see __global__ void AdamUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float beta1, float beta2, float epsilon, float weightDecayRate, float *pDevFirstMomentum, float *pDevFirstVelocity)
@param *pParameter 업데이트 할 Tensor를 가지고 있는 Operator포인터
@param pFirstMomentum 업데이트할 pFirstMomentum
@param pFirstVelocity 업데이트할 pFirstVelocity
@param pUnbiasedMomentum 업데이트할 pUnbiasedMomentum
@param pUnbiasedVelocity 업데이트할 pUnbiasedVelocity
@return 성공 시 TRUE
*/
template<typename DTYPE> int AdamOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pFirstMomentum, Tensor<DTYPE> *pFirstVelocity, Tensor<DTYPE> *pUnbiasedMomentum, Tensor<DTYPE> *pUnbiasedVelocity) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
// GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevFirstMomentum = pFirstMomentum->GetGPUData();
DTYPE *m_pDevFirstVelocity = pFirstVelocity->GetGPUData();
AdamUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_Beta1, m_Beta2, m_epsilon, weightDecayRate, m_pDevFirstMomentum, m_pDevFirstVelocity);
return TRUE;
}
#endif // ifdef __CUDNN__
|
6d510374ca2c7aa3370e568554a01526692d9ee0.hip | // !!! This is a file automatically generated by hipify!!!
// License Summary: MIT see LICENSE file
#include "al2o3_platform/platform.h"
#include "al2o3_memory/memory.h"
#include "accelcuda.h"
#include <hip/hip_runtime.h>
#include <hip/hiprtc.h>
struct _AccelCUDA_Cuda {
int deviceIndex;
hipDevice_t device;
hipCtx_t context;
};
struct _AccelCUDA_Function {
hipFunction_t function;
size_t sharedMemBytes;
uint64_t blockDimX;
uint64_t blockDimY;
uint64_t blockDimZ;
hipFuncCache cacheConfig;
};
static_assert(sizeof(uintptr_t) == sizeof(hipStream_t), "sizeof hipStream_t error");
static_assert(sizeof(uintptr_t) == sizeof(hipDeviceptr_t), "sizeof hipDeviceptr_t error");
static_assert(sizeof(uintptr_t) == sizeof(hipModule_t), "sizeof CUModule error");
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoArchName for SM %d.%d is undefined. Default to use %s\n",
major,
minor,
nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
void checkCUDA(hipError_t result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), hipGetErrorName(result), func);
}
}
void errCheck(hipError_t result, char const *const func, const char *const file,
int const line) {
if (result) {
char const *str = nullptr;
hipGetErrorString(result, &str);
LOGERROR("CU driver error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), str, func);
}
}
void errCheck(hiprtcResult result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("NVRTC error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), hiprtcGetErrorString(result), func);
}
}
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) checkCUDA((val), #val, __FILE__, __LINE__)
#define checkErrors(val) errCheck((val), #val, __FILE__, __LINE__)
AL2O3_EXTERN_C AccelCUDA_Cuda AccelCUDA_Create() {
hipInit(0);
int deviceCount;
int pickedDeviceIndex = -1;
int pickedTotalCores = -1;
checkErrors(hipGetDeviceCount(&deviceCount));
LOGINFO("--- CUDA Devices ---");
for (int i = 0u; i < deviceCount; ++i) {
hipDevice_t currentDevice;
checkErrors( hipDeviceGet(¤tDevice, i));
int devProps[hipDeviceAttributeMax] {};
for(int j=1;j < hipDeviceAttributeMax;++j) {
checkErrors(hipDeviceGetAttribute(devProps + j, (hipDeviceAttribute_t)j, currentDevice));
}
int version[2] = {
devProps[hipDeviceAttributeComputeCapabilityMajor],
devProps[hipDeviceAttributeComputeCapabilityMinor],
};
int const coresPerSM = _ConvertSMVer2Cores(version[0], version[1]);
int const totalCores = coresPerSM * devProps[hipDeviceAttributeMultiprocessorCount];
int const computePerf = totalCores * (devProps[hipDeviceAttributeClockRate]/1024);
char name[2048];
hipDeviceGetName(name, 2048, currentDevice);
LOGINFO("%d: %s %s (%d.%d)", i,
name, _ConvertSMVer2ArchName(version[0], version[1]), version[0], version[1]);
LOGINFO("%d: SMs %d, Cores %d, Total Cores %d Clock %d ~GFLOPs %f", i,
devProps[hipDeviceAttributeMultiprocessorCount], coresPerSM, totalCores, devProps[hipDeviceAttributeClockRate]/1024/1024, ((float)2 * computePerf)/1024.0f);
// for now just pick the biggest new enough device
if (totalCores > pickedTotalCores) {
pickedDeviceIndex = i;
pickedTotalCores = totalCores;
}
}
LOGINFO("---");
int nvrtcMajor = 0;
int nvrtcMinor = 0;
checkErrors(hiprtcVersion( &nvrtcMajor, &nvrtcMajor));
LOGINFO("NVRTC V %i.%i", nvrtcMajor, nvrtcMinor);
if (pickedDeviceIndex == -1) {
return nullptr;
}
_AccelCUDA_Cuda* cuda = (_AccelCUDA_Cuda*)MEMORY_CALLOC(1, sizeof(_AccelCUDA_Cuda));
if(!cuda) return nullptr;
cuda->deviceIndex = pickedDeviceIndex;
checkErrors(hipDeviceGet(&cuda->device, pickedDeviceIndex));
checkErrors(hipDevicePrimaryCtxRetain(&cuda->context, cuda->device));
checkErrors(hipCtxSetCurrent(cuda->context));
return cuda;
}
AL2O3_EXTERN_C void AccelCUDA_Destroy(AccelCUDA_Cuda cuda) {
if(!cuda) return;
checkErrors(hipCtxSynchronize());
checkErrors(hipDevicePrimaryCtxRelease(cuda->device));
MEMORY_FREE(cuda);
}
AL2O3_EXTERN_C AccelCUDA_Stream AccelCUDA_StreamCreate(AccelCUDA_Cuda cuda) {
hipStream_t stream;
checkErrors(hipStreamCreate__(&stream, hipStreamDefault));
return (AccelCUDA_Stream)stream;
}
AL2O3_EXTERN_C void AccelCUDA_StreamDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Stream stream) {
checkErrors(hipStreamDestroy((hipStream_t)stream));
}
AL2O3_EXTERN_C AccelCUDA_Module AccelCUDA_ModuleCreateFromPTX(AccelCUDA_Cuda cuda, char const *ptx) {
hipModule_t module;
checkErrors( hipModuleLoadData(&module, ptx));
return (AccelCUDA_Module)module;
}
AL2O3_EXTERN_C void AccelCUDA_ModuleDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Module module) {
checkErrors(hipModuleUnload((hipModule_t)module));
}
AL2O3_EXTERN_C AccelCUDA_Function AccelCUDA_FunctionCreate(AccelCUDA_Cuda cuda, AccelCUDA_Module module, char const *name) {
_AccelCUDA_Function* func = (_AccelCUDA_Function*)MEMORY_CALLOC(1, sizeof(_AccelCUDA_Function));
if(!func) return (AccelCUDA_Function)nullptr;
hipFunction_t function;
checkErrors(hipModuleGetFunction(&function, (hipModule_t)module, name));
func->function = function;
AccelCUDA_FunctionSetCacheConfig(func, ACCC_PREFER_L1);
return func;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Function func) {
MEMORY_FREE(func);
}
AL2O3_EXTERN_C bool AccelCUDA_StreamIsIdle(AccelCUDA_Stream stream) {
return hipStreamQuery((hipStream_t)stream) == hipSuccess;
}
AL2O3_EXTERN_C void AccelCUDA_StreamSynchronize(AccelCUDA_Stream stream) {
checkErrors(hipStreamSynchronize((hipStream_t)stream));
}
AL2O3_EXTERN_C AccelCUDA_DeviceMemory AccelCUDA_ModuleGetGlobal(AccelCUDA_Module module, char const *name) {
hipDeviceptr_t memory;
checkErrors(hipModuleGetGlobal(&memory, nullptr, (hipModule_t)module, name));
return (AccelCUDA_DeviceMemory)memory;
}
AL2O3_EXTERN_C size_t AccelCUDA_ModuleGetGlobalSize(AccelCUDA_Module module, char const *name) {
size_t size;
checkErrors(hipModuleGetGlobal(nullptr, &size, (hipModule_t)module, name));
return size;
}
AL2O3_EXTERN_C AccelCUDA_DeviceMemory AccelCUDA_DeviceMalloc(AccelCUDA_Cuda cuda, uint64_t size) {
hipDeviceptr_t dptr;
checkErrors(cuMemAlloc(&dptr, size));
return (AccelCUDA_DeviceMemory)dptr;
}
AL2O3_EXTERN_C AccelCUDA_DevicePitchedMemory AccelCUDA_DeviceMalloc2D(AccelCUDA_Cuda cuda, uint64_t width, uint64_t height) {
_AccelCUDA_DevicePitchedMemory * dptr = (_AccelCUDA_DevicePitchedMemory *) MEMORY_CALLOC(1, sizeof(_AccelCUDA_DevicePitchedMemory));
checkErrors(hipMemAllocPitch__((hipDeviceptr_t*)&dptr->ptr, &dptr->pitch, width, height, 4));
return dptr;
}
AL2O3_EXTERN_C void AccelCUDA_FreeDeviceMemory(AccelCUDA_Cuda cuda, AccelCUDA_DeviceMemory memory) {
checkErrors(hipFree((hipDeviceptr_t)memory));
}
AL2O3_EXTERN_C void AccelCUDA_FreeDeviceMemory2D(AccelCUDA_Cuda cuda, AccelCUDA_DevicePitchedMemory memory) {
checkErrors(hipFree((hipDeviceptr_t)memory->ptr));
MEMORY_FREE((void*)memory);
}
AL2O3_EXTERN_C void* AccelCUDA_HostMalloc(AccelCUDA_Cuda cuda, size_t size) {
void* dptr = nullptr;
checkErrors(hipMemAllocHost(&dptr, size));
return dptr;
}
AL2O3_EXTERN_C void AccelCUDA_FreeHostMemory(AccelCUDA_Cuda cuda, void* memory) {
checkErrors(hipHostFree(memory));
}
AL2O3_EXTERN_C void AccelCUDA_CopyHostToDevice(AccelCUDA_Stream stream, void const* hostMemory, AccelCUDA_DeviceMemory devMemory, size_t bytes) {
checkErrors(cuMemcpyHtoDAsync((hipDeviceptr_t)devMemory, hostMemory, bytes, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_CopyDeviceToHost(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory devMemory, void * hostMem, size_t bytes) {
checkErrors(cuMemcpyDtoHAsync(hostMem, (hipDeviceptr_t)devMemory, bytes, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_CopyDeviceToDevice(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory srcDevMemory, AccelCUDA_DeviceMemory dstDevMemory, size_t bytes) {
checkErrors(cuMemcpyDtoDAsync((hipDeviceptr_t)dstDevMemory, (hipDeviceptr_t)srcDevMemory, bytes, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt8(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint8_t val) {
checkErrors(hipMemsetD8Async((hipDeviceptr_t)memory, val, size, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt16(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint16_t val) {
checkErrors(hipMemsetD16Async((hipDeviceptr_t)memory, val, size, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt32(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint32_t val) {
checkErrors(hipMemsetAsync((hipDeviceptr_t)memory, val, size, (hipStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToFloat(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, float val) {
union { float f; uint32_t i; } fv;
fv.f = val;
checkErrors(hipMemsetAsync((hipDeviceptr_t)memory, fv.i, size, (hipStream_t)stream));
}
AL2O3_EXTERN_C size_t AccelCUDA_GetSizeOfDeviceMemory(AccelCUDA_DeviceMemory memory) {
size_t size = 0;
checkErrors(hipMemGetAddressRange(nullptr, &size, memory));
return size;
}
AL2O3_EXTERN_C void AccelCUDA_StreamPointGlobalTo(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory src, AccelCUDA_DeviceMemory global) {
hipDeviceptr_t address;
checkErrors(hipMemGetAddressRange(&address, nullptr, (hipDeviceptr_t) src));
checkErrors(cuMemcpyHtoDAsync(global, &address, sizeof(hipDeviceptr_t), (hipStream_t) stream));
}
AL2O3_EXTERN_C void AccelCUDA_ModulePointGlobalTo(AccelCUDA_Stream stream, AccelCUDA_Module module, char const *name, AccelCUDA_DeviceMemory memory) {
hipDeviceptr_t buf;
hipDeviceptr_t address;
size_t bytes;
size_t psize;
checkErrors(hipModuleGetGlobal(&buf, &bytes, (hipModule_t) module, name));
checkErrors(hipMemGetAddressRange(&address, &psize, memory));
checkErrors(cuMemcpyHtoDAsync(buf, &address, bytes, (hipStream_t) stream));
}
AL2O3_EXTERN_C int AccelCUDA_FunctionGetAttribute(AccelCUDA_Function function, AccelCUDA_FunctionAttribute attribute) {
int value;
checkErrors(hipFuncGetAttribute(&value, (hipFuncAttribute_t)attribute, (hipFunction_t)function));
return value;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetMaxDynamicSharedBytes(AccelCUDA_Function function, int size) {
checkErrors(cuFuncSetAttribute((hipFunction_t)function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, size));
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetPreferredSharedMemoryCarveOutHint(AccelCUDA_Function function, int size) {
checkErrors(cuFuncSetAttribute((hipFunction_t)function, CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT, size));
}
AL2O3_EXTERN_C AccelCUDA_CacheConfig AccelCUDA_FunctionGetCacheConfig(AccelCUDA_Function function) {
switch(function->cacheConfig) {
case hipFuncCachePreferShared:
return ACCC_PREFER_SHARED;
case hipFuncCachePreferL1:
return ACCC_PREFER_L1;
case hipFuncCachePreferEqual:
return ACCC_PREFER_EQUAL;
case hipFuncCachePreferNone:
default:
return ACCC_PREFER_L1;
}
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetCacheConfig(AccelCUDA_Function func, AccelCUDA_CacheConfig config) {
switch(config) {
case ACCC_PREFER_SHARED: func->cacheConfig = hipFuncCachePreferShared; break;
case ACCC_PREFER_L1: func->cacheConfig = hipFuncCachePreferL1; break;
case ACCC_PREFER_EQUAL: func->cacheConfig = hipFuncCachePreferEqual; break;
default: LOGERROR("Invalid Cache Config"); return;
}
checkErrors(hipFuncSetCacheConfig((hipFunction_t) func->function, func->cacheConfig));
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetDynamicSharedMemorySize(AccelCUDA_Function func, size_t size) {
func->sharedMemBytes = size;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetBlockDims(AccelCUDA_Function func, uint64_t x, uint64_t y, uint64_t z) {
func->blockDimX = x;
func->blockDimY = y;
func->blockDimZ = z;
}
AL2O3_EXTERN_C uint32_t AccelCUDA_FunctionGetMaxActiveBlocksPerMultiprocessor(AccelCUDA_Function func) {
int numBlocks = 0;
int totalBlockSize = (int)(func->blockDimX * func->blockDimY * func->blockDimZ);
hipModuleOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, func->function, totalBlockSize, func->sharedMemBytes);
return (uint32_t) numBlocks;
}
AL2O3_EXTERN_C void AccelCUDA_StreamLaunchCoopFunction(AccelCUDA_Stream stream, AccelCUDA_Function func,
uint64_t gridDimX, uint64_t gridDimY, uint64_t gridDimZ) {
ASSERT(func->blockDimX != 0);
ASSERT(func->blockDimY != 0);
ASSERT(func->blockDimZ != 0);
checkErrors(hipModuleLaunchCooperativeKernel((hipFunction_t)func->function,
(unsigned int)gridDimX, (unsigned int)gridDimY, (unsigned int)gridDimZ,
(unsigned int)func->blockDimX, (unsigned int)func->blockDimY, (unsigned int)func->blockDimZ,
(unsigned int)func->sharedMemBytes,
(hipStream_t)stream, nullptr));
}
AL2O3_EXTERN_C void AccelCUDA_StreamHostCallback(AccelCUDA_Stream stream, AccelCUDA_HostCallback callback, void* userData) {
checkErrors(cuLaunchHostFunc((hipStream_t)stream, callback, userData));
}
AL2O3_EXTERN_C void AccelCUDA_StreamLaunchFunction(AccelCUDA_Stream stream, AccelCUDA_Function func,
uint64_t gridDimX, uint64_t gridDimY, uint64_t gridDimZ) {
ASSERT(func->blockDimX != 0);
ASSERT(func->blockDimY != 0);
ASSERT(func->blockDimZ != 0);
checkErrors(hipModuleLaunchKernel((hipFunction_t)func->function,
(unsigned int)gridDimX, (unsigned int)gridDimY, (unsigned int)gridDimZ,
(unsigned int)func->blockDimX, (unsigned int)func->blockDimY, (unsigned int)func->blockDimZ,
(unsigned int)func->sharedMemBytes,
(hipStream_t)stream, nullptr, nullptr));
}
| 6d510374ca2c7aa3370e568554a01526692d9ee0.cu | // License Summary: MIT see LICENSE file
#include "al2o3_platform/platform.h"
#include "al2o3_memory/memory.h"
#include "accelcuda.h"
#include <cuda.h>
#include <nvrtc.h>
struct _AccelCUDA_Cuda {
int deviceIndex;
CUdevice device;
CUcontext context;
};
struct _AccelCUDA_Function {
CUfunction function;
size_t sharedMemBytes;
uint64_t blockDimX;
uint64_t blockDimY;
uint64_t blockDimZ;
CUfunc_cache cacheConfig;
};
static_assert(sizeof(uintptr_t) == sizeof(CUstream), "sizeof CUstream error");
static_assert(sizeof(uintptr_t) == sizeof(CUdeviceptr), "sizeof CUdeviceptr error");
static_assert(sizeof(uintptr_t) == sizeof(CUmodule), "sizeof CUModule error");
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoArchName for SM %d.%d is undefined. Default to use %s\n",
major,
minor,
nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
void checkCUDA(cudaError_t result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), cudaGetErrorName(result), func);
}
}
void errCheck(CUresult result, char const *const func, const char *const file,
int const line) {
if (result) {
char const *str = nullptr;
cuGetErrorString(result, &str);
LOGERROR("CU driver error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), str, func);
}
}
void errCheck(nvrtcResult result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("NVRTC error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), nvrtcGetErrorString(result), func);
}
}
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) checkCUDA((val), #val, __FILE__, __LINE__)
#define checkErrors(val) errCheck((val), #val, __FILE__, __LINE__)
AL2O3_EXTERN_C AccelCUDA_Cuda AccelCUDA_Create() {
cuInit(0);
int deviceCount;
int pickedDeviceIndex = -1;
int pickedTotalCores = -1;
checkErrors(cuDeviceGetCount(&deviceCount));
LOGINFO("--- CUDA Devices ---");
for (int i = 0u; i < deviceCount; ++i) {
CUdevice currentDevice;
checkErrors( cuDeviceGet(¤tDevice, i));
int devProps[CU_DEVICE_ATTRIBUTE_MAX] {};
for(int j=1;j < CU_DEVICE_ATTRIBUTE_MAX;++j) {
checkErrors(cuDeviceGetAttribute(devProps + j, (CUdevice_attribute)j, currentDevice));
}
int version[2] = {
devProps[CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR],
devProps[CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR],
};
int const coresPerSM = _ConvertSMVer2Cores(version[0], version[1]);
int const totalCores = coresPerSM * devProps[CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT];
int const computePerf = totalCores * (devProps[CU_DEVICE_ATTRIBUTE_CLOCK_RATE]/1024);
char name[2048];
cuDeviceGetName(name, 2048, currentDevice);
LOGINFO("%d: %s %s (%d.%d)", i,
name, _ConvertSMVer2ArchName(version[0], version[1]), version[0], version[1]);
LOGINFO("%d: SMs %d, Cores %d, Total Cores %d Clock %d ~GFLOPs %f", i,
devProps[CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT], coresPerSM, totalCores, devProps[CU_DEVICE_ATTRIBUTE_CLOCK_RATE]/1024/1024, ((float)2 * computePerf)/1024.0f);
// for now just pick the biggest new enough device
if (totalCores > pickedTotalCores) {
pickedDeviceIndex = i;
pickedTotalCores = totalCores;
}
}
LOGINFO("---");
int nvrtcMajor = 0;
int nvrtcMinor = 0;
checkErrors(nvrtcVersion( &nvrtcMajor, &nvrtcMajor));
LOGINFO("NVRTC V %i.%i", nvrtcMajor, nvrtcMinor);
if (pickedDeviceIndex == -1) {
return nullptr;
}
_AccelCUDA_Cuda* cuda = (_AccelCUDA_Cuda*)MEMORY_CALLOC(1, sizeof(_AccelCUDA_Cuda));
if(!cuda) return nullptr;
cuda->deviceIndex = pickedDeviceIndex;
checkErrors(cuDeviceGet(&cuda->device, pickedDeviceIndex));
checkErrors(cuDevicePrimaryCtxRetain(&cuda->context, cuda->device));
checkErrors(cuCtxSetCurrent(cuda->context));
return cuda;
}
AL2O3_EXTERN_C void AccelCUDA_Destroy(AccelCUDA_Cuda cuda) {
if(!cuda) return;
checkErrors(cuCtxSynchronize());
checkErrors(cuDevicePrimaryCtxRelease(cuda->device));
MEMORY_FREE(cuda);
}
AL2O3_EXTERN_C AccelCUDA_Stream AccelCUDA_StreamCreate(AccelCUDA_Cuda cuda) {
CUstream stream;
checkErrors(cuStreamCreate(&stream, CU_STREAM_DEFAULT));
return (AccelCUDA_Stream)stream;
}
AL2O3_EXTERN_C void AccelCUDA_StreamDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Stream stream) {
checkErrors(cuStreamDestroy((CUstream)stream));
}
AL2O3_EXTERN_C AccelCUDA_Module AccelCUDA_ModuleCreateFromPTX(AccelCUDA_Cuda cuda, char const *ptx) {
CUmodule module;
checkErrors( cuModuleLoadData(&module, ptx));
return (AccelCUDA_Module)module;
}
AL2O3_EXTERN_C void AccelCUDA_ModuleDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Module module) {
checkErrors(cuModuleUnload((CUmodule)module));
}
AL2O3_EXTERN_C AccelCUDA_Function AccelCUDA_FunctionCreate(AccelCUDA_Cuda cuda, AccelCUDA_Module module, char const *name) {
_AccelCUDA_Function* func = (_AccelCUDA_Function*)MEMORY_CALLOC(1, sizeof(_AccelCUDA_Function));
if(!func) return (AccelCUDA_Function)nullptr;
CUfunction function;
checkErrors(cuModuleGetFunction(&function, (CUmodule)module, name));
func->function = function;
AccelCUDA_FunctionSetCacheConfig(func, ACCC_PREFER_L1);
return func;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionDestroy(AccelCUDA_Cuda cuda, AccelCUDA_Function func) {
MEMORY_FREE(func);
}
AL2O3_EXTERN_C bool AccelCUDA_StreamIsIdle(AccelCUDA_Stream stream) {
return cuStreamQuery((CUstream)stream) == CUDA_SUCCESS;
}
AL2O3_EXTERN_C void AccelCUDA_StreamSynchronize(AccelCUDA_Stream stream) {
checkErrors(cuStreamSynchronize((CUstream)stream));
}
AL2O3_EXTERN_C AccelCUDA_DeviceMemory AccelCUDA_ModuleGetGlobal(AccelCUDA_Module module, char const *name) {
CUdeviceptr memory;
checkErrors(cuModuleGetGlobal(&memory, nullptr, (CUmodule)module, name));
return (AccelCUDA_DeviceMemory)memory;
}
AL2O3_EXTERN_C size_t AccelCUDA_ModuleGetGlobalSize(AccelCUDA_Module module, char const *name) {
size_t size;
checkErrors(cuModuleGetGlobal(nullptr, &size, (CUmodule)module, name));
return size;
}
AL2O3_EXTERN_C AccelCUDA_DeviceMemory AccelCUDA_DeviceMalloc(AccelCUDA_Cuda cuda, uint64_t size) {
CUdeviceptr dptr;
checkErrors(cuMemAlloc(&dptr, size));
return (AccelCUDA_DeviceMemory)dptr;
}
AL2O3_EXTERN_C AccelCUDA_DevicePitchedMemory AccelCUDA_DeviceMalloc2D(AccelCUDA_Cuda cuda, uint64_t width, uint64_t height) {
_AccelCUDA_DevicePitchedMemory * dptr = (_AccelCUDA_DevicePitchedMemory *) MEMORY_CALLOC(1, sizeof(_AccelCUDA_DevicePitchedMemory));
checkErrors(cuMemAllocPitch((CUdeviceptr*)&dptr->ptr, &dptr->pitch, width, height, 4));
return dptr;
}
AL2O3_EXTERN_C void AccelCUDA_FreeDeviceMemory(AccelCUDA_Cuda cuda, AccelCUDA_DeviceMemory memory) {
checkErrors(cuMemFree((CUdeviceptr)memory));
}
AL2O3_EXTERN_C void AccelCUDA_FreeDeviceMemory2D(AccelCUDA_Cuda cuda, AccelCUDA_DevicePitchedMemory memory) {
checkErrors(cuMemFree((CUdeviceptr)memory->ptr));
MEMORY_FREE((void*)memory);
}
AL2O3_EXTERN_C void* AccelCUDA_HostMalloc(AccelCUDA_Cuda cuda, size_t size) {
void* dptr = nullptr;
checkErrors(cuMemAllocHost(&dptr, size));
return dptr;
}
AL2O3_EXTERN_C void AccelCUDA_FreeHostMemory(AccelCUDA_Cuda cuda, void* memory) {
checkErrors(cuMemFreeHost(memory));
}
AL2O3_EXTERN_C void AccelCUDA_CopyHostToDevice(AccelCUDA_Stream stream, void const* hostMemory, AccelCUDA_DeviceMemory devMemory, size_t bytes) {
checkErrors(cuMemcpyHtoDAsync((CUdeviceptr)devMemory, hostMemory, bytes, (cudaStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_CopyDeviceToHost(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory devMemory, void * hostMem, size_t bytes) {
checkErrors(cuMemcpyDtoHAsync(hostMem, (CUdeviceptr)devMemory, bytes, (cudaStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_CopyDeviceToDevice(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory srcDevMemory, AccelCUDA_DeviceMemory dstDevMemory, size_t bytes) {
checkErrors(cuMemcpyDtoDAsync((CUdeviceptr)dstDevMemory, (CUdeviceptr)srcDevMemory, bytes, (cudaStream_t)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt8(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint8_t val) {
checkErrors(cuMemsetD8Async((CUdeviceptr)memory, val, size, (CUstream)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt16(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint16_t val) {
checkErrors(cuMemsetD16Async((CUdeviceptr)memory, val, size, (CUstream)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToUInt32(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, uint32_t val) {
checkErrors(cuMemsetD32Async((CUdeviceptr)memory, val, size, (CUstream)stream));
}
AL2O3_EXTERN_C void AccelCUDA_SetDeviceMemoryToFloat(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory memory, size_t size, float val) {
union { float f; uint32_t i; } fv;
fv.f = val;
checkErrors(cuMemsetD32Async((CUdeviceptr)memory, fv.i, size, (CUstream)stream));
}
AL2O3_EXTERN_C size_t AccelCUDA_GetSizeOfDeviceMemory(AccelCUDA_DeviceMemory memory) {
size_t size = 0;
checkErrors(cuMemGetAddressRange(nullptr, &size, memory));
return size;
}
AL2O3_EXTERN_C void AccelCUDA_StreamPointGlobalTo(AccelCUDA_Stream stream, AccelCUDA_DeviceMemory src, AccelCUDA_DeviceMemory global) {
CUdeviceptr address;
checkErrors(cuMemGetAddressRange(&address, nullptr, (CUdeviceptr) src));
checkErrors(cuMemcpyHtoDAsync(global, &address, sizeof(CUdeviceptr), (CUstream) stream));
}
AL2O3_EXTERN_C void AccelCUDA_ModulePointGlobalTo(AccelCUDA_Stream stream, AccelCUDA_Module module, char const *name, AccelCUDA_DeviceMemory memory) {
CUdeviceptr buf;
CUdeviceptr address;
size_t bytes;
size_t psize;
checkErrors(cuModuleGetGlobal(&buf, &bytes, (CUmodule) module, name));
checkErrors(cuMemGetAddressRange(&address, &psize, memory));
checkErrors(cuMemcpyHtoDAsync(buf, &address, bytes, (CUstream) stream));
}
AL2O3_EXTERN_C int AccelCUDA_FunctionGetAttribute(AccelCUDA_Function function, AccelCUDA_FunctionAttribute attribute) {
int value;
checkErrors(cuFuncGetAttribute(&value, (CUfunction_attribute)attribute, (CUfunction)function));
return value;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetMaxDynamicSharedBytes(AccelCUDA_Function function, int size) {
checkErrors(cuFuncSetAttribute((CUfunction)function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, size));
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetPreferredSharedMemoryCarveOutHint(AccelCUDA_Function function, int size) {
checkErrors(cuFuncSetAttribute((CUfunction)function, CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT, size));
}
AL2O3_EXTERN_C AccelCUDA_CacheConfig AccelCUDA_FunctionGetCacheConfig(AccelCUDA_Function function) {
switch(function->cacheConfig) {
case CU_FUNC_CACHE_PREFER_SHARED:
return ACCC_PREFER_SHARED;
case CU_FUNC_CACHE_PREFER_L1:
return ACCC_PREFER_L1;
case CU_FUNC_CACHE_PREFER_EQUAL:
return ACCC_PREFER_EQUAL;
case CU_FUNC_CACHE_PREFER_NONE:
default:
return ACCC_PREFER_L1;
}
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetCacheConfig(AccelCUDA_Function func, AccelCUDA_CacheConfig config) {
switch(config) {
case ACCC_PREFER_SHARED: func->cacheConfig = CU_FUNC_CACHE_PREFER_SHARED; break;
case ACCC_PREFER_L1: func->cacheConfig = CU_FUNC_CACHE_PREFER_L1; break;
case ACCC_PREFER_EQUAL: func->cacheConfig = CU_FUNC_CACHE_PREFER_EQUAL; break;
default: LOGERROR("Invalid Cache Config"); return;
}
checkErrors(cuFuncSetCacheConfig((CUfunction) func->function, func->cacheConfig));
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetDynamicSharedMemorySize(AccelCUDA_Function func, size_t size) {
func->sharedMemBytes = size;
}
AL2O3_EXTERN_C void AccelCUDA_FunctionSetBlockDims(AccelCUDA_Function func, uint64_t x, uint64_t y, uint64_t z) {
func->blockDimX = x;
func->blockDimY = y;
func->blockDimZ = z;
}
AL2O3_EXTERN_C uint32_t AccelCUDA_FunctionGetMaxActiveBlocksPerMultiprocessor(AccelCUDA_Function func) {
int numBlocks = 0;
int totalBlockSize = (int)(func->blockDimX * func->blockDimY * func->blockDimZ);
cuOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, func->function, totalBlockSize, func->sharedMemBytes);
return (uint32_t) numBlocks;
}
AL2O3_EXTERN_C void AccelCUDA_StreamLaunchCoopFunction(AccelCUDA_Stream stream, AccelCUDA_Function func,
uint64_t gridDimX, uint64_t gridDimY, uint64_t gridDimZ) {
ASSERT(func->blockDimX != 0);
ASSERT(func->blockDimY != 0);
ASSERT(func->blockDimZ != 0);
checkErrors(cuLaunchCooperativeKernel((CUfunction)func->function,
(unsigned int)gridDimX, (unsigned int)gridDimY, (unsigned int)gridDimZ,
(unsigned int)func->blockDimX, (unsigned int)func->blockDimY, (unsigned int)func->blockDimZ,
(unsigned int)func->sharedMemBytes,
(CUstream)stream, nullptr));
}
AL2O3_EXTERN_C void AccelCUDA_StreamHostCallback(AccelCUDA_Stream stream, AccelCUDA_HostCallback callback, void* userData) {
checkErrors(cuLaunchHostFunc((CUstream)stream, callback, userData));
}
AL2O3_EXTERN_C void AccelCUDA_StreamLaunchFunction(AccelCUDA_Stream stream, AccelCUDA_Function func,
uint64_t gridDimX, uint64_t gridDimY, uint64_t gridDimZ) {
ASSERT(func->blockDimX != 0);
ASSERT(func->blockDimY != 0);
ASSERT(func->blockDimZ != 0);
checkErrors(cuLaunchKernel((CUfunction)func->function,
(unsigned int)gridDimX, (unsigned int)gridDimY, (unsigned int)gridDimZ,
(unsigned int)func->blockDimX, (unsigned int)func->blockDimY, (unsigned int)func->blockDimZ,
(unsigned int)func->sharedMemBytes,
(CUstream)stream, nullptr, nullptr));
}
|
45727178a929e96cdbb86d65d40280f00e748107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
hipFuncAttributes attrs;
cudaSafeCall( hipFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, std::vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + std::min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (std::size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (std::size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
| 45727178a929e96cdbb86d65d40280f00e748107.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
cudaFuncAttributes attrs;
cudaSafeCall( cudaFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, std::vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + std::min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (std::size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (std::size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
|
655d45743b6c5e1067582720fe9e708391c305e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define DIVIDER 10000
__global__ void even(int *darr,int n)
{
int k=blockIdx.x*512+threadIdx.x;
int t;
k=k*2; //for even positions
if(k< n-1)
{
if(darr[k]>darr[k+1])
{ //swap the numbers
t=darr[k];
darr[k]=darr[k+1];
darr[k+1] =t;
}
}
}
__global__ void odd(int *darr,int n)
{
int k=blockIdx.x*512+threadIdx.x;
int t;
k=k*2 +1; //for odd positions
if(k< n-1)
{
if(darr[k]>darr[k+1])
{ //swap the numbers
t=darr[k];
darr[k]=darr[k+1];
darr[k+1] =t;
}
}
}
int main()
{
int *arr,*darr;
int n,i;
time_t t;
srand((unsigned)time(&t));
printf("\n Enter how many numbers :");
scanf("%d",&n);
arr=(int *)malloc(n*sizeof(int)); //for dynamic inputs
for(i=0; i<n; i++)
{
arr[i] = (rand() % DIVIDER) + 1;
}
printf("\n UNSORTED ARRAY \n");
for(i=0; i<n; i++)
printf("\t%d",arr[i]);
hipMalloc(&darr,n*sizeof(int)); //memory allocation in GPU for darr
hipMemcpy(darr,arr ,n*sizeof(int) ,hipMemcpyHostToDevice); // data transfer from host to GPU
for(i=0;i<=n/2;i++)
{
hipLaunchKernelGGL(( even), dim3(n/1024+1),dim3(512), 0, 0, darr,n);
hipLaunchKernelGGL(( odd), dim3(n/1024+1),dim3(512), 0, 0, darr,n);
}
hipMemcpy(arr,darr,n*sizeof(int),hipMemcpyDeviceToHost);
printf("\n SORTED ARRAY \n");
for(i=0; i<n; i++)
printf("\t%d",arr[i]);
}
/*
*****************************output******************************************************
Enter how many numbers :1070
UNSORTED ARRAY
4332 4334 2422 4945 7974 1295 8969 555 6155 8599 5802 2480 7514
491 3373 8094 7801 2145 2270 9097 2843 7732 9978 4217 9980 8368
8960 4001 8449 2586 2838 9133 3272 1612 4077 1245 2906 3045
1799 9060 7995 3953 7892 5509 4443 7616 3602 8596 9760 2223
7692 2602 9954 7669 6818 6285 6037 5778 285 837 4715 9475 9969
7986 1086 4046 9231 343 3442 7381 5754 7789 7685 9997 9649 2128
7612 9602 723 7371 1824 4766 9973 8129 2434 6790 4413 4822 8919
1050 2011 9986 524 1979 7971 7961 2376 3553 4655 2170 7286 408
9958 4970 405 9606 3449 8016 9207 523 5387 7382 5288 1711 5510
4074 4852 6274 8895 123 7323 905 108 4198 9236 8078 2158 7963 7983
6812 132 5268 3572 6441 237 328 2398 38 8343 7956 560 81 5337 2200 1791
7198 6273 2995 3472 1519 3117 794 8776 3224 4992 8011 7653 3501
2325 5635 313 8809 7254 236 5249 7491 563 3999 3880 5257 1954 791 1690
3643 2990 9832 840 5614 2826 4311 7133 5942 1457 5908 9165 6448
270 3170 9948 2594 8804 6612 1402 6058 3199 3003 9900 3761 7001
131 5370 5306 921 7059 5300 3911 6890 6140 9524 9716 6802 3008 2009
8258 5267 7526 1057 5536 695 1005 8130 5850 3968 5883 8259 7167
8885 4510 7279 2237 4640 2648 3895 1913 9706 9194 5823 2948 1685
1698 9015 8487 1058 1023 3096 6324 8548 4153 1860 5594 1509 6341
1444 1828 8575 6054 8994 3812 564 6273 6048 1555 5272 6294 3467
4978 5488 5641 4277 7172 3691 3291 2010 1100 4313 5106 7423 9213
5610 5634 4806 3470 8326 2601 5297 6901 8655 4291 712 5570 6915
3111 3476 2186 9405 6943 3515 1244 8935 7791 8415 2625 1081 6777
76 1746 8234 7499 958 3843 9484 5763 7312 7810 8364 8960 4710 3370
9602 1773 8939 6516 4883 2414 5054 639 5708 4920 1882 4643 9063
6649 3619 6495 3425 3695 8240 1658 7545 9197 1852 7028 1312 5515
4837 9675 4474 5898 9396 4076 4022 4686 6943 5257 3451 8348 5895
9159 9620 4129 153 8682 777 123 1528 4201 169 9768 2210 4065 5316 4061
1093 6627 5927 2281 2653 400 4531 2048 827 8552 3085 7770 160 6536 2469
6055 5694 2088 183 2198 7121 7311 8672 8649 1511 8841 4768 72 2905
83 4132 349 3062 58 8982 2066 6809 3512 466 7636 8415 3550 1757 4927
85 4225 981 2130 2665 7515 679 9785 4825 9351 4785 2687 8191 5904 2758
7447 5987 3241 7796 5400 9650 3129 7465 2810 2992 4282 445 1406 7832
8553 6332 4268 2778 3664 6398 1794 7530 7076 7930 8706 6426 2715
7744 968 8618 6853 8415 956 6445 2562 2707 2446 5690 172 5256 8681 4453
2052 6438 8636 605 9122 2904 9734 9137 5653 1527 3019 9080 9456 8076
5506 8522 5820 2825 7140 2672 7591 4447 5469 6504 7154 7914 8545
3677 3169 3577 4481 1573 6367 3117 2177 5488 6020 1910 976 8024 9788
3994 7103 9243 2070 8960 7765 4241 1785 1256 3264 5727 5702 8732
2231 2855 2998 7127 2883 2518 704 7364 4090 7070 480 2618 8909 2851
4527 9884 874 666 230 7976 9909 8651 3288 4025 2891 5072 1632 6154 7150
7333 1238 9380 6540 4235 6507 9422 6752 3562 6785 7194 631 3616 9811
9539 6466 690 5774 3691 1355 6003 8019 7615 4653 1306 1639 3895 6377
3270 49 3526 6955 7638 9258 9846 8224 2116 9267 1327 5677 2404 8520
6307 6019 8331 2197 8837 5372 7970 2527 6726 3973 545 4341 4977 1850
2331 8872 4578 1953 5272 4456 5259 2909 65 5104 1132 2180 722 8810
4208 3125 7330 514 5496 2012 9062 684 7383 7031 3210 4108 7355 107 4800
8684 8308 7131 7555 9238 5435 9178 3693 693 8438 109 5796 5921 8640
6517 4730 2847 5994 2059 9712 7841 4070 8773 8524 7804 2155 8085
1912 9510 8191 3063 8193 2851 6545 2099 2088 1979 1276 2132 2671
9713 2240 4818 5633 879 1335 362 3725 3680 8773 9788 1520 9194 8560
6395 6998 714 4479 5261 6575 2670 8323 1119 5520 4868 9569 3959 3198
844 6090 5869 6908 4681 686 8892 5559 8372 5606 5635 2051 4378 5422
9922 9923 333 6316 6920 7398 795 2180 3973 9816 6855 1443 1687 8074
1012 5645 1271 8207 8086 3491 5115 2766 529 358 4676 8900 5963 6662
7303 6692 2083 7224 6615 8767 9892 3534 6164 7038 2066 6488 6853
5272 4283 8539 3345 5294 535 967 3500 8620 4458 4966 1385 1338 5324
2412 237 7638 9073 7539 4330 7507 1115 944 6273 7358 829 8788 4395 9246
1628 1247 4517 5910 6137 4213 7555 6671 1532 7406 1642 2341 2372
9378 3678 4047 1789 3914 1684 861 7805 6013 8367 5271 3308 991 2628
4137 9778 7022 3382 1405 4620 4251 3666 756 8463 1220 3778 9994 4978
1771 2334 7349 1148 6011 7747 2936 6277 5782 3796 4081 1795 8514
9351 1454 9504 8330 5590 5633 5351 5324 3390 9970 9574 7055 7077
8036 4627 7206 4382 9604 8976 3067 3304 6475 5430 7402 9410 1706
3183 9557 2138 1329 4422 1488 2783 3925 9817 8372 5909 1519 47 9298
7840 9620 2705 1268 4008 3683 4825 4741 9638 152 7807 9293 6626 3236
6694 6035 4941 6228 1943 7078 7557 6364 4917 339 6640 1085 5062 2548
8955 5109 8198 6794 1080 7254 4413 5087 936 9237 9827 573 9388 3986
9865 6013 7221 2910 8399 8514 9137 341 1943 6693 3056 6860 3383 9695
4296 8445 8595 3251 9905 3144 6396 984 397 809 2423 1332 45 8601 8256
5785 8938 4472 1797 6159 7381 196 1024 2869 6888 2966 5914 6296 6177
9296 2342 6825 4092 936 75 348 431 2822 7684 827 3630 106 2158 27 5058 6765
5811 3996 1236 3959 6506 4968 506 7529 7837 3746 6846 3750 41 3023
3045 2382 9847 3489 9670 6273 3836 6452 5446 1519 7279 5428 7976
5788 5454 3034 2553 7616 3381 3788 7926 9886 8756 8432 3766 2944
8529 611 6693 4921 9985 6089 7302 6183 9577 3323 8807 3413 9775 4253
1283 3405 9680 9259 9192 1485 2292 1744 5452 2024 1884 3377 8261
6991 8160 2026 9934 6688 8988 2978 1608 5325 9066 5262 7859 4995
4936
SORTED ARRAY
27 38 41 45 47 49 58 65 72 75 76 81 83 85 106 107 108 109 123 123 131 132 152 153 160 169 172
183 196 230 236 237 237 270 285 313 328 333 339 341 343 348 349 358 362 397 400 405 408 431 445 466 480 491
506 514 523 524 529 535 545 555 560 563 564 573 605 611 631 639 666 679 684 686 690 693 695 704 712 714 722
723 756 777 791 794 795 809 827 827 829 837 840 844 861 874 879 905 921 936 936 944 956 958 967 968 976 981
984 991 1005 1012 1023 1024 1050 1057 1058 1080 1081 1085 1086 1093 1100
1115 1119 1132 1148 1220 1236 1238 1244 1245 1247 1256 1268 1271 1276
1283 1295 1306 1312 1327 1329 1332 1335 1338 1355 1385 1402 1405 1406
1443 1444 1454 1457 1485 1488 1509 1511 1519 1519 1519 1520 1527 1528
1532 1555 1573 1608 1612 1628 1632 1639 1642 1658 1684 1685 1687 1690
1698 1706 1711 1744 1746 1757 1771 1773 1785 1789 1791 1794 1795 1797
1799 1824 1828 1850 1852 1860 1882 1884 1910 1912 1913 1943 1943 1953
1954 1979 1979 2009 2010 2011 2012 2024 2026 2048 2051 2052 2059 2066
2066 2070 2083 2088 2088 2099 2116 2128 2130 2132 2138 2145 2155 2158
2158 2170 2177 2180 2180 2186 2197 2198 2200 2210 2223 2231 2237 2240
2270 2281 2292 2325 2331 2334 2341 2342 2372 2376 2382 2398 2404 2412
2414 2422 2423 2434 2446 2469 2480 2518 2527 2548 2553 2562 2586 2594
2601 2602 2618 2625 2628 2648 2653 2665 2670 2671 2672 2687 2705 2707
2715 2758 2766 2778 2783 2810 2822 2825 2826 2838 2843 2847 2851 2851
2855 2869 2883 2891 2904 2905 2906 2909 2910 2936 2944 2948 2966 2978
2990 2992 2995 2998 3003 3008 3019 3023 3034 3045 3045 3056 3062 3063
3067 3085 3096 3111 3117 3117 3125 3129 3144 3169 3170 3183 3198 3199
3210 3224 3236 3241 3251 3264 3270 3272 3288 3291 3304 3308 3323 3345
3370 3373 3377 3381 3382 3383 3390 3405 3413 3425 3442 3449 3451 3467
3470 3472 3476 3489 3491 3500 3501 3512 3515 3526 3534 3550 3553 3562
3572 3577 3602 3616 3619 3630 3643 3664 3666 3677 3678 3680 3683 3691
3691 3693 3695 3725 3746 3750 3761 3766 3778 3788 3796 3812 3836 3843
3880 3895 3895 3911 3914 3925 3953 3959 3959 3968 3973 3973 3986 3994
3996 3999 4001 4008 4022 4025 4046 4047 4061 4065 4070 4074 4076 4077
4081 4090 4092 4108 4129 4132 4137 4153 4198 4201 4208 4213 4217 4225
4235 4241 4251 4253 4268 4277 4282 4283 4291 4296 4311 4313 4330 4332
4334 4341 4378 4382 4395 4413 4413 4422 4443 4447 4453 4456 4458 4472
4474 4479 4481 4510 4517 4527 4531 4578 4620 4627 4640 4643 4653 4655
4676 4681 4686 4710 4715 4730 4741 4766 4768 4785 4800 4806 4818 4822
4825 4825 4837 4852 4868 4883 4917 4920 4921 4927 4936 4941 4945 4966
4968 4970 4977 4978 4978 4992 4995 5054 5058 5062 5072 5087 5104 5106
5109 5115 5249 5256 5257 5257 5259 5261 5262 5267 5268 5271 5272 5272
5272 5288 5294 5297 5300 5306 5316 5324 5324 5325 5337 5351 5370 5372
5387 5400 5422 5428 5430 5435 5446 5452 5454 5469 5488 5488 5496 5506
5509 5510 5515 5520 5536 5559 5570 5590 5594 5606 5610 5614 5633 5633
5634 5635 5635 5641 5645 5653 5677 5690 5694 5702 5708 5727 5754 5763
5774 5778 5782 5785 5788 5796 5802 5811 5820 5823 5850 5869 5883 5895
5898 5904 5908 5909 5910 5914 5921 5927 5942 5963 5987 5994 6003 6011
6013 6013 6019 6020 6035 6037 6048 6054 6055 6058 6089 6090 6137 6140
6154 6155 6159 6164 6177 6183 6228 6273 6273 6273 6273 6274 6277 6285
6294 6296 6307 6316 6324 6332 6341 6364 6367 6377 6395 6396 6398 6426
6438 6441 6445 6448 6452 6466 6475 6488 6495 6504 6506 6507 6516 6517
6536 6540 6545 6575 6612 6615 6626 6627 6640 6649 6662 6671 6688 6692
6693 6693 6694 6726 6752 6765 6777 6785 6790 6794 6802 6809 6812 6818
6825 6846 6853 6853 6855 6860 6888 6890 6901 6908 6915 6920 6943 6943
6955 6991 6998 7001 7022 7028 7031 7038 7055 7059 7070 7076 7077 7078
7103 7121 7127 7131 7133 7140 7150 7154 7167 7172 7194 7198 7206 7221
7224 7254 7254 7279 7279 7286 7302 7303 7311 7312 7323 7330 7333 7349
7355 7358 7364 7371 7381 7381 7382 7383 7398 7402 7406 7423 7447 7465
7491 7499 7507 7514 7515 7526 7529 7530 7539 7545 7555 7555 7557 7591
7612 7615 7616 7616 7636 7638 7638 7653 7669 7684 7685 7692 7732 7744
7747 7765 7770 7789 7791 7796 7801 7804 7805 7807 7810 7832 7837 7840
7841 7859 7892 7914 7926 7930 7956 7961 7963 7970 7971 7974 7976 7976
7983 7986 7995 8011 8016 8019 8024 8036 8074 8076 8078 8085 8086 8094
8129 8130 8160 8191 8191 8193 8198 8207 8224 8234 8240 8256 8258 8259
8261 8308 8323 8326 8330 8331 8343 8348 8364 8367 8368 8372 8372 8399
8415 8415 8415 8432 8438 8445 8449 8463 8487 8514 8514 8520 8522 8524
8529 8539 8545 8548 8552 8553 8560 8575 8595 8596 8599 8601 8618 8620
8636 8640 8649 8651 8655 8672 8681 8682 8684 8706 8732 8756 8767 8773
8773 8776 8788 8804 8807 8809 8810 8837 8841 8872 8885 8892 8895 8900
8909 8919 8935 8938 8939 8955 8960 8960 8960 8969 8976 8982 8988 8994
9015 9060 9062 9063 9066 9073 9080 9097 9122 9133 9137 9137 9159 9165
9178 9192 9194 9194 9197 9207 9213 9231 9236 9237 9238 9243 9246 9258
9259 9267 9293 9296 9298 9351 9351 9378 9380 9388 9396 9405 9410 9422
9456 9475 9484 9504 9510 9524 9539 9557 9569 9574 9577 9602 9602 9604
9606 9620 9620 9638 9649 9650 9670 9675 9680 9695 9706 9712 9713 9716
9734 9760 9768 9775 9778 9785 9788 9788 9811 9816 9817 9827 9832 9846
9847 9865 9884 9886 9892 9900 9905 9909 9922 9923 9934 9948 9954 9958
9969 9970 9973 9978 9980 9985 9986 9994 9997
*/
| 655d45743b6c5e1067582720fe9e708391c305e5.cu | #include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define DIVIDER 10000
__global__ void even(int *darr,int n)
{
int k=blockIdx.x*512+threadIdx.x;
int t;
k=k*2; //for even positions
if(k< n-1)
{
if(darr[k]>darr[k+1])
{ //swap the numbers
t=darr[k];
darr[k]=darr[k+1];
darr[k+1] =t;
}
}
}
__global__ void odd(int *darr,int n)
{
int k=blockIdx.x*512+threadIdx.x;
int t;
k=k*2 +1; //for odd positions
if(k< n-1)
{
if(darr[k]>darr[k+1])
{ //swap the numbers
t=darr[k];
darr[k]=darr[k+1];
darr[k+1] =t;
}
}
}
int main()
{
int *arr,*darr;
int n,i;
time_t t;
srand((unsigned)time(&t));
printf("\n Enter how many numbers :");
scanf("%d",&n);
arr=(int *)malloc(n*sizeof(int)); //for dynamic inputs
for(i=0; i<n; i++)
{
arr[i] = (rand() % DIVIDER) + 1;
}
printf("\n UNSORTED ARRAY \n");
for(i=0; i<n; i++)
printf("\t%d",arr[i]);
cudaMalloc(&darr,n*sizeof(int)); //memory allocation in GPU for darr
cudaMemcpy(darr,arr ,n*sizeof(int) ,cudaMemcpyHostToDevice); // data transfer from host to GPU
for(i=0;i<=n/2;i++)
{
even<<<n/1024+1,512>>>(darr,n);
odd<<<n/1024+1,512>>>(darr,n);
}
cudaMemcpy(arr,darr,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n SORTED ARRAY \n");
for(i=0; i<n; i++)
printf("\t%d",arr[i]);
}
/*
*****************************output******************************************************
Enter how many numbers :1070
UNSORTED ARRAY
4332 4334 2422 4945 7974 1295 8969 555 6155 8599 5802 2480 7514
491 3373 8094 7801 2145 2270 9097 2843 7732 9978 4217 9980 8368
8960 4001 8449 2586 2838 9133 3272 1612 4077 1245 2906 3045
1799 9060 7995 3953 7892 5509 4443 7616 3602 8596 9760 2223
7692 2602 9954 7669 6818 6285 6037 5778 285 837 4715 9475 9969
7986 1086 4046 9231 343 3442 7381 5754 7789 7685 9997 9649 2128
7612 9602 723 7371 1824 4766 9973 8129 2434 6790 4413 4822 8919
1050 2011 9986 524 1979 7971 7961 2376 3553 4655 2170 7286 408
9958 4970 405 9606 3449 8016 9207 523 5387 7382 5288 1711 5510
4074 4852 6274 8895 123 7323 905 108 4198 9236 8078 2158 7963 7983
6812 132 5268 3572 6441 237 328 2398 38 8343 7956 560 81 5337 2200 1791
7198 6273 2995 3472 1519 3117 794 8776 3224 4992 8011 7653 3501
2325 5635 313 8809 7254 236 5249 7491 563 3999 3880 5257 1954 791 1690
3643 2990 9832 840 5614 2826 4311 7133 5942 1457 5908 9165 6448
270 3170 9948 2594 8804 6612 1402 6058 3199 3003 9900 3761 7001
131 5370 5306 921 7059 5300 3911 6890 6140 9524 9716 6802 3008 2009
8258 5267 7526 1057 5536 695 1005 8130 5850 3968 5883 8259 7167
8885 4510 7279 2237 4640 2648 3895 1913 9706 9194 5823 2948 1685
1698 9015 8487 1058 1023 3096 6324 8548 4153 1860 5594 1509 6341
1444 1828 8575 6054 8994 3812 564 6273 6048 1555 5272 6294 3467
4978 5488 5641 4277 7172 3691 3291 2010 1100 4313 5106 7423 9213
5610 5634 4806 3470 8326 2601 5297 6901 8655 4291 712 5570 6915
3111 3476 2186 9405 6943 3515 1244 8935 7791 8415 2625 1081 6777
76 1746 8234 7499 958 3843 9484 5763 7312 7810 8364 8960 4710 3370
9602 1773 8939 6516 4883 2414 5054 639 5708 4920 1882 4643 9063
6649 3619 6495 3425 3695 8240 1658 7545 9197 1852 7028 1312 5515
4837 9675 4474 5898 9396 4076 4022 4686 6943 5257 3451 8348 5895
9159 9620 4129 153 8682 777 123 1528 4201 169 9768 2210 4065 5316 4061
1093 6627 5927 2281 2653 400 4531 2048 827 8552 3085 7770 160 6536 2469
6055 5694 2088 183 2198 7121 7311 8672 8649 1511 8841 4768 72 2905
83 4132 349 3062 58 8982 2066 6809 3512 466 7636 8415 3550 1757 4927
85 4225 981 2130 2665 7515 679 9785 4825 9351 4785 2687 8191 5904 2758
7447 5987 3241 7796 5400 9650 3129 7465 2810 2992 4282 445 1406 7832
8553 6332 4268 2778 3664 6398 1794 7530 7076 7930 8706 6426 2715
7744 968 8618 6853 8415 956 6445 2562 2707 2446 5690 172 5256 8681 4453
2052 6438 8636 605 9122 2904 9734 9137 5653 1527 3019 9080 9456 8076
5506 8522 5820 2825 7140 2672 7591 4447 5469 6504 7154 7914 8545
3677 3169 3577 4481 1573 6367 3117 2177 5488 6020 1910 976 8024 9788
3994 7103 9243 2070 8960 7765 4241 1785 1256 3264 5727 5702 8732
2231 2855 2998 7127 2883 2518 704 7364 4090 7070 480 2618 8909 2851
4527 9884 874 666 230 7976 9909 8651 3288 4025 2891 5072 1632 6154 7150
7333 1238 9380 6540 4235 6507 9422 6752 3562 6785 7194 631 3616 9811
9539 6466 690 5774 3691 1355 6003 8019 7615 4653 1306 1639 3895 6377
3270 49 3526 6955 7638 9258 9846 8224 2116 9267 1327 5677 2404 8520
6307 6019 8331 2197 8837 5372 7970 2527 6726 3973 545 4341 4977 1850
2331 8872 4578 1953 5272 4456 5259 2909 65 5104 1132 2180 722 8810
4208 3125 7330 514 5496 2012 9062 684 7383 7031 3210 4108 7355 107 4800
8684 8308 7131 7555 9238 5435 9178 3693 693 8438 109 5796 5921 8640
6517 4730 2847 5994 2059 9712 7841 4070 8773 8524 7804 2155 8085
1912 9510 8191 3063 8193 2851 6545 2099 2088 1979 1276 2132 2671
9713 2240 4818 5633 879 1335 362 3725 3680 8773 9788 1520 9194 8560
6395 6998 714 4479 5261 6575 2670 8323 1119 5520 4868 9569 3959 3198
844 6090 5869 6908 4681 686 8892 5559 8372 5606 5635 2051 4378 5422
9922 9923 333 6316 6920 7398 795 2180 3973 9816 6855 1443 1687 8074
1012 5645 1271 8207 8086 3491 5115 2766 529 358 4676 8900 5963 6662
7303 6692 2083 7224 6615 8767 9892 3534 6164 7038 2066 6488 6853
5272 4283 8539 3345 5294 535 967 3500 8620 4458 4966 1385 1338 5324
2412 237 7638 9073 7539 4330 7507 1115 944 6273 7358 829 8788 4395 9246
1628 1247 4517 5910 6137 4213 7555 6671 1532 7406 1642 2341 2372
9378 3678 4047 1789 3914 1684 861 7805 6013 8367 5271 3308 991 2628
4137 9778 7022 3382 1405 4620 4251 3666 756 8463 1220 3778 9994 4978
1771 2334 7349 1148 6011 7747 2936 6277 5782 3796 4081 1795 8514
9351 1454 9504 8330 5590 5633 5351 5324 3390 9970 9574 7055 7077
8036 4627 7206 4382 9604 8976 3067 3304 6475 5430 7402 9410 1706
3183 9557 2138 1329 4422 1488 2783 3925 9817 8372 5909 1519 47 9298
7840 9620 2705 1268 4008 3683 4825 4741 9638 152 7807 9293 6626 3236
6694 6035 4941 6228 1943 7078 7557 6364 4917 339 6640 1085 5062 2548
8955 5109 8198 6794 1080 7254 4413 5087 936 9237 9827 573 9388 3986
9865 6013 7221 2910 8399 8514 9137 341 1943 6693 3056 6860 3383 9695
4296 8445 8595 3251 9905 3144 6396 984 397 809 2423 1332 45 8601 8256
5785 8938 4472 1797 6159 7381 196 1024 2869 6888 2966 5914 6296 6177
9296 2342 6825 4092 936 75 348 431 2822 7684 827 3630 106 2158 27 5058 6765
5811 3996 1236 3959 6506 4968 506 7529 7837 3746 6846 3750 41 3023
3045 2382 9847 3489 9670 6273 3836 6452 5446 1519 7279 5428 7976
5788 5454 3034 2553 7616 3381 3788 7926 9886 8756 8432 3766 2944
8529 611 6693 4921 9985 6089 7302 6183 9577 3323 8807 3413 9775 4253
1283 3405 9680 9259 9192 1485 2292 1744 5452 2024 1884 3377 8261
6991 8160 2026 9934 6688 8988 2978 1608 5325 9066 5262 7859 4995
4936
SORTED ARRAY
27 38 41 45 47 49 58 65 72 75 76 81 83 85 106 107 108 109 123 123 131 132 152 153 160 169 172
183 196 230 236 237 237 270 285 313 328 333 339 341 343 348 349 358 362 397 400 405 408 431 445 466 480 491
506 514 523 524 529 535 545 555 560 563 564 573 605 611 631 639 666 679 684 686 690 693 695 704 712 714 722
723 756 777 791 794 795 809 827 827 829 837 840 844 861 874 879 905 921 936 936 944 956 958 967 968 976 981
984 991 1005 1012 1023 1024 1050 1057 1058 1080 1081 1085 1086 1093 1100
1115 1119 1132 1148 1220 1236 1238 1244 1245 1247 1256 1268 1271 1276
1283 1295 1306 1312 1327 1329 1332 1335 1338 1355 1385 1402 1405 1406
1443 1444 1454 1457 1485 1488 1509 1511 1519 1519 1519 1520 1527 1528
1532 1555 1573 1608 1612 1628 1632 1639 1642 1658 1684 1685 1687 1690
1698 1706 1711 1744 1746 1757 1771 1773 1785 1789 1791 1794 1795 1797
1799 1824 1828 1850 1852 1860 1882 1884 1910 1912 1913 1943 1943 1953
1954 1979 1979 2009 2010 2011 2012 2024 2026 2048 2051 2052 2059 2066
2066 2070 2083 2088 2088 2099 2116 2128 2130 2132 2138 2145 2155 2158
2158 2170 2177 2180 2180 2186 2197 2198 2200 2210 2223 2231 2237 2240
2270 2281 2292 2325 2331 2334 2341 2342 2372 2376 2382 2398 2404 2412
2414 2422 2423 2434 2446 2469 2480 2518 2527 2548 2553 2562 2586 2594
2601 2602 2618 2625 2628 2648 2653 2665 2670 2671 2672 2687 2705 2707
2715 2758 2766 2778 2783 2810 2822 2825 2826 2838 2843 2847 2851 2851
2855 2869 2883 2891 2904 2905 2906 2909 2910 2936 2944 2948 2966 2978
2990 2992 2995 2998 3003 3008 3019 3023 3034 3045 3045 3056 3062 3063
3067 3085 3096 3111 3117 3117 3125 3129 3144 3169 3170 3183 3198 3199
3210 3224 3236 3241 3251 3264 3270 3272 3288 3291 3304 3308 3323 3345
3370 3373 3377 3381 3382 3383 3390 3405 3413 3425 3442 3449 3451 3467
3470 3472 3476 3489 3491 3500 3501 3512 3515 3526 3534 3550 3553 3562
3572 3577 3602 3616 3619 3630 3643 3664 3666 3677 3678 3680 3683 3691
3691 3693 3695 3725 3746 3750 3761 3766 3778 3788 3796 3812 3836 3843
3880 3895 3895 3911 3914 3925 3953 3959 3959 3968 3973 3973 3986 3994
3996 3999 4001 4008 4022 4025 4046 4047 4061 4065 4070 4074 4076 4077
4081 4090 4092 4108 4129 4132 4137 4153 4198 4201 4208 4213 4217 4225
4235 4241 4251 4253 4268 4277 4282 4283 4291 4296 4311 4313 4330 4332
4334 4341 4378 4382 4395 4413 4413 4422 4443 4447 4453 4456 4458 4472
4474 4479 4481 4510 4517 4527 4531 4578 4620 4627 4640 4643 4653 4655
4676 4681 4686 4710 4715 4730 4741 4766 4768 4785 4800 4806 4818 4822
4825 4825 4837 4852 4868 4883 4917 4920 4921 4927 4936 4941 4945 4966
4968 4970 4977 4978 4978 4992 4995 5054 5058 5062 5072 5087 5104 5106
5109 5115 5249 5256 5257 5257 5259 5261 5262 5267 5268 5271 5272 5272
5272 5288 5294 5297 5300 5306 5316 5324 5324 5325 5337 5351 5370 5372
5387 5400 5422 5428 5430 5435 5446 5452 5454 5469 5488 5488 5496 5506
5509 5510 5515 5520 5536 5559 5570 5590 5594 5606 5610 5614 5633 5633
5634 5635 5635 5641 5645 5653 5677 5690 5694 5702 5708 5727 5754 5763
5774 5778 5782 5785 5788 5796 5802 5811 5820 5823 5850 5869 5883 5895
5898 5904 5908 5909 5910 5914 5921 5927 5942 5963 5987 5994 6003 6011
6013 6013 6019 6020 6035 6037 6048 6054 6055 6058 6089 6090 6137 6140
6154 6155 6159 6164 6177 6183 6228 6273 6273 6273 6273 6274 6277 6285
6294 6296 6307 6316 6324 6332 6341 6364 6367 6377 6395 6396 6398 6426
6438 6441 6445 6448 6452 6466 6475 6488 6495 6504 6506 6507 6516 6517
6536 6540 6545 6575 6612 6615 6626 6627 6640 6649 6662 6671 6688 6692
6693 6693 6694 6726 6752 6765 6777 6785 6790 6794 6802 6809 6812 6818
6825 6846 6853 6853 6855 6860 6888 6890 6901 6908 6915 6920 6943 6943
6955 6991 6998 7001 7022 7028 7031 7038 7055 7059 7070 7076 7077 7078
7103 7121 7127 7131 7133 7140 7150 7154 7167 7172 7194 7198 7206 7221
7224 7254 7254 7279 7279 7286 7302 7303 7311 7312 7323 7330 7333 7349
7355 7358 7364 7371 7381 7381 7382 7383 7398 7402 7406 7423 7447 7465
7491 7499 7507 7514 7515 7526 7529 7530 7539 7545 7555 7555 7557 7591
7612 7615 7616 7616 7636 7638 7638 7653 7669 7684 7685 7692 7732 7744
7747 7765 7770 7789 7791 7796 7801 7804 7805 7807 7810 7832 7837 7840
7841 7859 7892 7914 7926 7930 7956 7961 7963 7970 7971 7974 7976 7976
7983 7986 7995 8011 8016 8019 8024 8036 8074 8076 8078 8085 8086 8094
8129 8130 8160 8191 8191 8193 8198 8207 8224 8234 8240 8256 8258 8259
8261 8308 8323 8326 8330 8331 8343 8348 8364 8367 8368 8372 8372 8399
8415 8415 8415 8432 8438 8445 8449 8463 8487 8514 8514 8520 8522 8524
8529 8539 8545 8548 8552 8553 8560 8575 8595 8596 8599 8601 8618 8620
8636 8640 8649 8651 8655 8672 8681 8682 8684 8706 8732 8756 8767 8773
8773 8776 8788 8804 8807 8809 8810 8837 8841 8872 8885 8892 8895 8900
8909 8919 8935 8938 8939 8955 8960 8960 8960 8969 8976 8982 8988 8994
9015 9060 9062 9063 9066 9073 9080 9097 9122 9133 9137 9137 9159 9165
9178 9192 9194 9194 9197 9207 9213 9231 9236 9237 9238 9243 9246 9258
9259 9267 9293 9296 9298 9351 9351 9378 9380 9388 9396 9405 9410 9422
9456 9475 9484 9504 9510 9524 9539 9557 9569 9574 9577 9602 9602 9604
9606 9620 9620 9638 9649 9650 9670 9675 9680 9695 9706 9712 9713 9716
9734 9760 9768 9775 9778 9785 9788 9788 9811 9816 9817 9827 9832 9846
9847 9865 9884 9886 9892 9900 9905 9909 9922 9923 9934 9948 9954 9958
9969 9970 9973 9978 9980 9985 9986 9994 9997
*/
|
efb5d78e839e0012e50c65890b92c5d38946570b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
hipMemcpy(result, dev_c, sizeof(int)*N, hipMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
hipEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&start_kernel);
hipEventCreate(&stop_kernel);
hipMalloc((void**)&dev_vec, sizeof(int)*N);
hipMalloc((void**)&dev_scval, sizeof(int)*varsize);
hipMalloc((void**)&dev_result, sizeof(int)*N);
hipMalloc((void**)&dev_sccol, sizeof(int)*varsize);
hipMalloc((void**)&dev_cols, sizeof(int)*(N/c));
hipMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//hipEventRecord(start,0);
hipMemcpy(dev_vec, vecX, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, hipMemcpyHostToDevice);
hipMemcpy(dev_result, result, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, hipMemcpyHostToDevice);
hipMemcpy(dev_cols, cols, sizeof(int)*(N/c), hipMemcpyHostToDevice);
hipMemcpy(dev_cs, cs, sizeof(int)*(N/c), hipMemcpyHostToDevice);
hipEventRecord(start_kernel,0);
hipLaunchKernelGGL(( multiply), dim3(N/c),dim3(c), 0, 0, dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
hipEventRecord(stop_kernel,0);
hipMemcpy(result, dev_result, sizeof(int)*N, hipMemcpyDeviceToHost);
//hipEventRecord(stop,0);
hipEventSynchronize(stop_kernel);
//hipEventElapsedTime(&time, start, stop);
hipEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
hipFree(dev_vec);
hipFree(dev_scval);
hipFree(dev_result);
hipFree(dev_sccol);
hipFree(dev_cols);
return 0;
/*
hipMalloc((void**)&dev_vec, sizeof(int)*N);
hipMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
hipMalloc((void**)&dev_result, sizeof(int)*N);
hipMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
hipMemcpy(dev_a, vecX, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, varscval, sizeof(int)*varsize, hipMemcpyHostToDevice);
hipMemcpy(dev_c, result, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_d, varsccol, sizeof(int)*varsize, hipMemcpyHostToDevice);
*/
}
| efb5d78e839e0012e50c65890b92c5d38946570b.cu | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
cudaEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cs, cs, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
cudaEventRecord(stop_kernel,0);
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
cudaEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
}
|
14ad7ae169f53a937cdff367de42c2af3de50f7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cutlass/convolution/device/convolution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/command_line.h"
#include <torch/extension.h>
#include <iostream>
// The code section below describes datatype for input, output tensors and
// computation between elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue =
float; // Data type of epilogue computation (alpha, beta)
using ElementSrc = cutlass::half_t; // Data type of elements in src tensor
using ElementFilter =
cutlass::half_t; // Data type of elements in filter tensor
using ElementDst = cutlass::half_t; // Data type of elements in output tensor
using LayoutSrc = cutlass::layout::TensorNCHW;
using LayoutFilter = cutlass::layout::TensorNCHW;
using LayoutDst = cutlass::layout::TensorNCHW;
// This code section describes whether you want to use tensor cores or regular
// SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ThreadblockShape =
cutlass::gemm::GemmShape<64, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape =
cutlass::gemm::GemmShape<8, 8, 4>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseConvolutionFpropThreadblockSwizzle;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describes the epilogue part of the kernel, we use default
// value
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
ElementDst, // Data type of output matrix.
1, ElementAccumulator, // Data type of accumulator
ElementDst, // Data type of bias
ElementComputeEpilogue>; // Data type for alpha/beta in linear
// combination
using Convolution = cutlass::conv::device::Convolution<
ElementSrc, LayoutSrc, ElementFilter, LayoutFilter, ElementDst,
LayoutDst, ElementDst, LayoutDst, ElementAccumulator,
cutlass::conv::ConvType::kDepthwiseConvolution, MMAOp, SmArch,
ThreadblockShape, WarpShape, InstructionShape, EpilogueOp,
SwizzleThreadBlock, NumStages, 1, 1,
cutlass::conv::SpecialOptimizeDesc::NONE, cutlass::arch::OpMultiplyAdd,
cutlass::conv::ImplicitGemmMode::GEMM_TN>;
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
float alpha;
float beta;
bool benchmark;
std::string tag;
Options()
: help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 1),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(1000),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) {}
// Verify the problem size is compatible with the CUTLASS Convolution
// implementation.
bool valid() {
int const kAlignment = 1;
if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) /
conv_stride.row() +
1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) /
conv_stride.column() +
1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS / K
int64_t fmas =
output_size().product() *
int64_t(filter_size.h() * filter_size.w() * filter_size.c()) /
output_size().c();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
#define CUTLASS_CHECK(status) \
{ \
cutlass::Status error = status; \
if (error != cutlass::Status::kSuccess) { \
std::cerr << "Got cutlass error: " \
<< cutlassGetStatusString(error) << " at: " << __LINE__ \
<< std::endl; \
exit(EXIT_FAILURE); \
} \
}
#define CUDA_CHECK(status) \
{ \
hipError_t error = status; \
if (error != hipSuccess) { \
std::cerr << "Got bad cuda status: " << hipGetErrorString(error) \
<< " at line: " << __LINE__ << std::endl; \
exit(EXIT_FAILURE); \
} \
}
#define CHECK_CUDA(x) \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
torch::Tensor forward_fp16(torch::Tensor input, torch::Tensor weight) {
CHECK_INPUT(input);
CHECK_INPUT(weight);
auto output = torch::zeros_like(input);
Options options = Options();
options.update({input.size(0), input.size(2), input.size(3), input.size(1)},
{weight.size(0), weight.size(2), weight.size(3), 1});
cutlass::TensorRef<ElementSrc, LayoutSrc> d_src(
(ElementSrc*)input.data_ptr(),
LayoutSrc::packed(options.input_size));
cutlass::TensorRef<ElementFilter, LayoutFilter> d_filter(
(ElementFilter*)weight.data_ptr(),
LayoutFilter::packed(options.filter_size));
cutlass::TensorRef<typename Convolution::ElementDst,
typename Convolution::LayoutDst>
d_dst((ElementDst*)output.data_ptr(),
LayoutDst::packed(options.output_size()));
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
typename Convolution::Arguments arguments{
{options.input_size, options.filter_size, options.padding,
options.conv_stride, options.dilation, options.output_size(), mode,
split_k_slices, options.filter_size.n()},
d_src, // tensor_src.device_ref(),
d_filter, // tensor_filter.device_ref(),
NULL, // tensor_bias.device_ref(),
NULL, // tensor_z.device_ref(),
d_dst, // tensor_dst.device_ref(),
{ElementComputeEpilogue(options.alpha), ElementComputeEpilogue(0),
ElementComputeEpilogue(options.beta)}};
//
// Initialize CUTLASS Convolution
//
Convolution conv_op;
size_t workspace_size = conv_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
CUTLASS_CHECK(conv_op.initialize(arguments, workspace.get()));
//
// Launch initialized CUTLASS kernel
//
CUTLASS_CHECK(conv_op());
return output;
}
| 14ad7ae169f53a937cdff367de42c2af3de50f7d.cu | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cutlass/convolution/device/convolution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/command_line.h"
#include <torch/extension.h>
#include <iostream>
// The code section below describes datatype for input, output tensors and
// computation between elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue =
float; // Data type of epilogue computation (alpha, beta)
using ElementSrc = cutlass::half_t; // Data type of elements in src tensor
using ElementFilter =
cutlass::half_t; // Data type of elements in filter tensor
using ElementDst = cutlass::half_t; // Data type of elements in output tensor
using LayoutSrc = cutlass::layout::TensorNCHW;
using LayoutFilter = cutlass::layout::TensorNCHW;
using LayoutDst = cutlass::layout::TensorNCHW;
// This code section describes whether you want to use tensor cores or regular
// SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ThreadblockShape =
cutlass::gemm::GemmShape<64, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape =
cutlass::gemm::GemmShape<8, 8, 4>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseConvolutionFpropThreadblockSwizzle;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describes the epilogue part of the kernel, we use default
// value
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
ElementDst, // Data type of output matrix.
1, ElementAccumulator, // Data type of accumulator
ElementDst, // Data type of bias
ElementComputeEpilogue>; // Data type for alpha/beta in linear
// combination
using Convolution = cutlass::conv::device::Convolution<
ElementSrc, LayoutSrc, ElementFilter, LayoutFilter, ElementDst,
LayoutDst, ElementDst, LayoutDst, ElementAccumulator,
cutlass::conv::ConvType::kDepthwiseConvolution, MMAOp, SmArch,
ThreadblockShape, WarpShape, InstructionShape, EpilogueOp,
SwizzleThreadBlock, NumStages, 1, 1,
cutlass::conv::SpecialOptimizeDesc::NONE, cutlass::arch::OpMultiplyAdd,
cutlass::conv::ImplicitGemmMode::GEMM_TN>;
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
float alpha;
float beta;
bool benchmark;
std::string tag;
Options()
: help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 1),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(1000),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) {}
// Verify the problem size is compatible with the CUTLASS Convolution
// implementation.
bool valid() {
int const kAlignment = 1;
if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) /
conv_stride.row() +
1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) /
conv_stride.column() +
1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS / K
int64_t fmas =
output_size().product() *
int64_t(filter_size.h() * filter_size.w() * filter_size.c()) /
output_size().c();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
#define CUTLASS_CHECK(status) \
{ \
cutlass::Status error = status; \
if (error != cutlass::Status::kSuccess) { \
std::cerr << "Got cutlass error: " \
<< cutlassGetStatusString(error) << " at: " << __LINE__ \
<< std::endl; \
exit(EXIT_FAILURE); \
} \
}
#define CUDA_CHECK(status) \
{ \
cudaError_t error = status; \
if (error != cudaSuccess) { \
std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \
<< " at line: " << __LINE__ << std::endl; \
exit(EXIT_FAILURE); \
} \
}
#define CHECK_CUDA(x) \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
torch::Tensor forward_fp16(torch::Tensor input, torch::Tensor weight) {
CHECK_INPUT(input);
CHECK_INPUT(weight);
auto output = torch::zeros_like(input);
Options options = Options();
options.update({input.size(0), input.size(2), input.size(3), input.size(1)},
{weight.size(0), weight.size(2), weight.size(3), 1});
cutlass::TensorRef<ElementSrc, LayoutSrc> d_src(
(ElementSrc*)input.data_ptr(),
LayoutSrc::packed(options.input_size));
cutlass::TensorRef<ElementFilter, LayoutFilter> d_filter(
(ElementFilter*)weight.data_ptr(),
LayoutFilter::packed(options.filter_size));
cutlass::TensorRef<typename Convolution::ElementDst,
typename Convolution::LayoutDst>
d_dst((ElementDst*)output.data_ptr(),
LayoutDst::packed(options.output_size()));
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
typename Convolution::Arguments arguments{
{options.input_size, options.filter_size, options.padding,
options.conv_stride, options.dilation, options.output_size(), mode,
split_k_slices, options.filter_size.n()},
d_src, // tensor_src.device_ref(),
d_filter, // tensor_filter.device_ref(),
NULL, // tensor_bias.device_ref(),
NULL, // tensor_z.device_ref(),
d_dst, // tensor_dst.device_ref(),
{ElementComputeEpilogue(options.alpha), ElementComputeEpilogue(0),
ElementComputeEpilogue(options.beta)}};
//
// Initialize CUTLASS Convolution
//
Convolution conv_op;
size_t workspace_size = conv_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
CUTLASS_CHECK(conv_op.initialize(arguments, workspace.get()));
//
// Launch initialized CUTLASS kernel
//
CUTLASS_CHECK(conv_op());
return output;
}
|
400f6094819934b38f3286c9eadea7a629c16bab.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <hip/hip_runtime.h>
#include "svm.h"
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
#define SUB_PROB 32
#define GRID_SIZE 1
void print_null(const char *s) {}
void exit_with_help()
{
printf(
"Usage: svm-train [options] training_set_file [model_file]\n"
"options:\n"
"-s svm_type : set type of SVM (default 0)\n"
" 0 -- C-SVC (multi-class classification)\n"
" 1 -- nu-SVC (multi-class classification)\n"
" 2 -- one-class SVM\n"
" 3 -- epsilon-SVR (regression)\n"
" 4 -- nu-SVR (regression)\n"
"-t kernel_type : set type of kernel function (default 2)\n"
" 0 -- linear: u'*v\n"
" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
" 4 -- precomputed kernel (kernel values in training_set_file)\n"
"-d degree : set degree in kernel function (default 3)\n"
"-g gamma : set gamma in kernel function (default 1/num_features)\n"
"-r coef0 : set coef0 in kernel function (default 0)\n"
"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
"-m cachesize : set cache memory size in MB (default 100)\n"
"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
"-v n: n-fold cross validation mode\n"
"-q : quiet mode (no outputs)\n"
);
exit(1);
}
void exit_input_error(int line_num)
{
fprintf(stderr,"Wrong input format at line %d\n", line_num);
exit(1);
}
int classes_index (double * d, int total, double y);
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
void read_problem(const char *filename);
void do_cross_validation ();
void get_classes_count (double ** classes, int ** count, int * total);
void allocate_final_model ();
__global__ void gpu_train (int * fm_k, int * fm_l, int * fm_space, int * fm_mask);
__global__ void create_subProblems (double * classes, int * count, int * prob_flags, int total);
__global__ void delete_subProblems ();
__global__ void copy_final_model ();
__device__ struct svm_parameter d_param;
__device__ struct svm_problem subproblems[SUB_PROB];
__device__ struct svm_model * models[SUB_PROB];
__device__ __managed__ struct svm_model final_model;
__device__ __managed__ int fm_k, fm_l, fm_space, fm_mask;
__device__ __managed__ struct svm_problem prob; // set by read_problem
__device__ __managed__ struct svm_node * x_space;
struct svm_parameter param; // set by parse_command_line
int cross_validation;
int nr_fold;
static char *line = NULL;
static int max_line_len;
static char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;
while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line,max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}
__global__ void pass_param (struct svm_parameter * temp_param) {
d_param = *temp_param;
}
int main(int argc, char **argv)
{
char input_file_name[1024];
char model_file_name[1024];
const char *error_msg;
parse_command_line(argc, argv, input_file_name, model_file_name);
read_problem(input_file_name);
error_msg = svm_check_parameter(&prob,¶m);
struct svm_parameter * temp_param;
hipMalloc(&temp_param, sizeof(struct svm_parameter));
hipMemcpy(temp_param, ¶m, sizeof(struct svm_parameter), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( pass_param), dim3(1),dim3(1), 0, 0, temp_param);
hipDeviceSynchronize();
hipFree(temp_param);
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After pass params", DEBUG, 0);
}
double * classes;
int * count, total;
get_classes_count(&classes, &count, &total);
double * d_classes;
int * d_count, * prob_flags;
hipMalloc(&d_classes, total * sizeof(double));
hipMalloc(&d_count, total * sizeof(int));
hipMallocManaged(&prob_flags, prob.l * sizeof(int));
hipMemcpy(d_classes, classes, total * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_count, count, total * sizeof(int), hipMemcpyHostToDevice);
hipMemset(prob_flags, 0, prob.l * sizeof(int));
hipLaunchKernelGGL(( create_subProblems), dim3(GRID_SIZE), dim3(SUB_PROB), 0, 0, d_classes, d_count, prob_flags, total);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After create sub problems", DEBUG, 0);
}
hipFree(d_classes);
hipFree(d_count);
hipFree(prob_flags);
free(classes);
free(count);
if(error_msg)
{
fprintf(stderr,"ERROR: %s\n",error_msg);
exit(1);
}
if(cross_validation)
{
do_cross_validation();
}
else
{
printf("Training models...\n");
hipLaunchKernelGGL(( gpu_train), dim3(GRID_SIZE), dim3(SUB_PROB), 0, 0, &fm_k, &fm_l, &fm_space, &fm_mask);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After Train!!", DEBUG, 0);
}
int max = fm_k * (fm_k - 1) / 2;
if (fm_k > max) max = fm_k;
if (fm_l > max) max = fm_l;
dim3 dimGrid((int)ceil(max / 1024.0));
dim3 dimBlock(1024);
allocate_final_model();
// printf("Copying final model to host...\n");
hipLaunchKernelGGL(( copy_final_model), dim3(dimGrid),dim3(dimBlock), 0, 0, );
hipDeviceSynchronize();
if(svm_save_model(model_file_name, &final_model))
{
fprintf(stderr, "can't save model to file %s\n", model_file_name);
exit(1);
}
cuda_destroy_model(&final_model);
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After save model", DEBUG, 0);
}
}
hipLaunchKernelGGL(( delete_subProblems), dim3(GRID_SIZE), dim3(SUB_PROB), 0, 0, );
hipDeviceSynchronize();
svm_destroy_param(¶m);
hipFree(prob.y);
hipFree(prob.x);
hipFree(x_space);
free(line);
return 0;
}
void do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double *target = Malloc(double,prob.l);
svm_cross_validation(&prob,¶m,nr_fold,target);
if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
printf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
printf("Cross Validation Squared correlation coefficient = %g\n",
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
);
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
}
free(target);
}
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
{
int i;
void (*print_func)(const char*) = NULL; // default printing to stdout
// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;
cross_validation = 0;
// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
if(++i>=argc)
exit_with_help();
switch(argv[i-1][1])
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = &print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
fprintf(stderr,"n-fold cross validation: n must >= 2\n");
exit_with_help();
}
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
fprintf(stderr,"Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
}
}
svm_set_print_string_function(print_func);
// determine filenames
if(i>=argc)
exit_with_help();
strcpy(input_file_name, argv[i]);
if(i<argc-1)
sprintf(model_file_name,"%s.gpu",argv[i+1]);
else
{
char *p = strrchr(argv[i],'/');
if(p==NULL)
p = argv[i];
else
++p;
sprintf(model_file_name,"%s.model.gpu",p);
}
}
// read in a problem (in svmlight format)
void read_problem(const char *filename)
{
int max_index, inst_max_index, i;
size_t elements, j;
FILE *fp = fopen(filename,"r");
char *endptr;
char *idx, *val, *label;
if(fp == NULL)
{
fprintf(stderr,"can't open input file %s\n",filename);
exit(1);
}
prob.l = 0;
elements = 0;
max_line_len = 1024;
line = Malloc(char,max_line_len);
while(readline(fp)!=NULL)
{
char *p = strtok(line," \t"); // label
// features
while(1)
{
p = strtok(NULL," \t");
if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
break;
++elements;
}
++elements;
++prob.l;
}
rewind(fp);
hipMallocManaged(&prob.y, prob.l * sizeof(double));
hipMallocManaged(&prob.x, prob.l * sizeof(struct svm_node*));
hipMallocManaged(&x_space, elements * sizeof(struct svm_node));
max_index = 0;
j=0;
for(i=0;i<prob.l;i++)
{
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
readline(fp);
prob.x[i] = &x_space[j];
label = strtok(line," \t\n");
if(label == NULL) // empty line
exit_input_error(i+1);
prob.y[i] = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
exit_input_error(i+1);
while(1)
{
idx = strtok(NULL,":");
val = strtok(NULL," \t");
if(val == NULL)
break;
errno = 0;
x_space[j].index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
exit_input_error(i+1);
else
inst_max_index = x_space[j].index;
errno = 0;
x_space[j].value = strtod(val,&endptr);
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
exit_input_error(i+1);
++j;
}
if(inst_max_index > max_index)
max_index = inst_max_index;
x_space[j++].index = -1;
}
if(param.gamma == 0 && max_index > 0)
param.gamma = 1.0/max_index;
if(param.kernel_type == PRECOMPUTED)
for(i=0;i<prob.l;i++)
{
if (prob.x[i][0].index != 0)
{
fprintf(stderr,"Wrong input format: first column must be 0:sample_serial_number\n");
exit(1);
}
if ((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > max_index)
{
fprintf(stderr,"Wrong input format: sample_serial_number out of range\n");
exit(1);
}
}
fclose(fp);
}
int classes_index (double * d, int total, double y) {
for (int i = 0; i < total; i++) {
if (d[i] == y) return i;
}
return -1;
}
// number of classes (total) and count of examples of each class (count)
void get_classes_count (double ** classes_ret, int ** count_ret, int * total_ret) {
int size = 16;
double * classes = Malloc(double, size);
int * count = (int *) calloc(size, sizeof(int));
int total = 0;
for (int j = 0; j < prob.l; j++) {
int i = classes_index(classes, total, prob.y[j]);
if (i != -1) {
count[i]++;
} else {
classes[total] = prob.y[j];
count[total]++;
total++;
if (total == size) {
size *= 2;
classes = (double *) realloc(classes, size * sizeof(double));
count = (int *) realloc(count, size * sizeof(int));
}
}
}
*classes_ret = classes;
*count_ret = count;
*total_ret = total;
}
__global__ void create_subProblems (double * classes, int * count, int * prob_flags, int total) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// number of examples per class
int * count_per_problem = Malloc(int, total);
for (int j = 0; j < total; j++) {
int r = count[j] % SUB_PROB;
count_per_problem[j] = count[j] / SUB_PROB;
if (i < r) count_per_problem[j]++;
}
// allocate subproblem
int sum = 0;
for (int j = 0; j < total; j++) {
sum += count_per_problem[j];
}
subproblems[i].l = sum;
subproblems[i].y = Malloc(double, sum);
subproblems[i].x = Malloc(struct svm_node*, sum);
// populate subproblem
int start = i, counter = 0;
while (counter != sum) {
if (prob_flags[start] != 0) {
start = (start + 1) % prob.l;
continue;
}
atomicAdd(&prob_flags[start], 1);
int class_index = -1;
for (int j = 0; j < total; j++) {
if (classes[j] == prob.y[start]) {
class_index = j;
break;
}
}
if (count_per_problem[class_index] != 0) {
count_per_problem[class_index]--;
subproblems[i].y[counter] = prob.y[start];
subproblems[i].x[counter] = prob.x[start];
counter++;
} else {
atomicSub(&prob_flags[start], 1);
}
start = (start + 1) % prob.l;
}
free(count_per_problem);
// allocate subproblem
// int l = prob.l / SUB_PROB;
// subproblems[i].l = l;
// subproblems[i].y = Malloc(double, l);
// subproblems[i].x = Malloc(struct svm_node*, l);
// populate subproblems
// for (int j = i * l, k = 0; k < l; j++, k++) {
// subproblems[i].y[k] = prob.y[j];
// subproblems[i].x[k] = prob.x[j];
// }
}
__global__ void delete_subProblems () {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (subproblems[i].y) {
free(subproblems[i].y);
free(subproblems[i].x);
}
}
__global__ void gpu_train (int * fm_k, int * fm_l, int * fm_space, int * fm_mask) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int k = 1; k <= SUB_PROB; k*=2) {
if (i < SUB_PROB/k) {
models[i] = svm_train(&subproblems[i], &d_param);
}
__syncthreads();
if (i < SUB_PROB/(2*k)) {
int offset = SUB_PROB/(2*k);
// if (models[i]->l + models[i+offset]->l != 0) {
subproblems[i].l = models[i]->l + models[i+offset]->l;
free(subproblems[i].y);
free(subproblems[i].x);
subproblems[i].y = Malloc(double, subproblems[i].l);
subproblems[i].x = Malloc(struct svm_node*, subproblems[i].l);
int index1 = 0;
for (int cl = 0; cl < models[i]->nr_class; cl++) {
for (int j = 0; j < models[i]->nSV[cl]; j++) {
subproblems[i].y[index1] = models[i]->label[cl];
subproblems[i].x[index1] = models[i]->SV[index1];
index1++;
}
}
int index2 = 0;
for (int cl = 0; cl < models[i+offset]->nr_class; cl++) {
for (int j = 0; j < models[i+offset]->nSV[cl]; j++) {
subproblems[i].y[index1] = models[i+offset]->label[cl];
subproblems[i].x[index1] = models[i+offset]->SV[index2];
index1++;
index2++;
}
}
// }
}
__syncthreads();
int start = (k != SUB_PROB) ? 0 : 1;
if (i >= start && i < SUB_PROB/k) {
svm_free_and_destroy_model(&models[i]);
}
__syncthreads();
}
if (i == 0) {
int space, max = 0;
const svm_node * p = models[0]->SV[0];
for (int j = 0; j < models[0]->l; j++) {
space = 0;
while (p->index != -1) {
space++;
p++;
}
if (space > max) max = space;
}
int mask = 0;
if (models[0]->label) mask += 1;
if (models[0]->probA) mask += 2;
if (models[0]->probB) mask += 4;
*fm_k = models[0]->nr_class;
*fm_l = models[0]->l;
*fm_space = max + 1;
*fm_mask = mask;
}
}
void allocate_final_model () {
int fm_t = fm_k * (fm_k - 1) / 2;
struct svm_node ** SV;
double ** sv_coef, * rho, * probA = NULL, * probB = NULL;
int * sv_indices, * label = NULL, * nSV;
hipMallocManaged(&SV, fm_l * sizeof(struct svm_node*));
for (int i = 0; i < fm_l; i++)
hipMallocManaged(&SV[i], fm_space * sizeof(struct svm_node));
hipMallocManaged(&sv_coef, (fm_k-1) * sizeof(double*));
for (int i = 0; i < fm_k - 1; i++)
hipMallocManaged(&sv_coef[i], fm_l * sizeof(double));
hipMallocManaged(&rho, fm_t * sizeof(double));
if (fm_mask & 2)
hipMallocManaged(&probA, fm_t * sizeof(double));
if (fm_mask & 4)
hipMallocManaged(&probB, fm_t * sizeof(double));
hipMallocManaged(&sv_indices, fm_l * sizeof(int));
if (fm_mask & 1)
hipMallocManaged(&label, fm_k * sizeof(int));
hipMallocManaged(&nSV, fm_k * sizeof(int));
final_model.SV = SV;
final_model.sv_coef = sv_coef;
final_model.rho = rho;
final_model.probA = probA;
final_model.probB = probB;
final_model.sv_indices = sv_indices;
final_model.label = label;
final_model.nSV = nSV;
final_model.param.svm_type = param.svm_type;
final_model.param.kernel_type = param.kernel_type;
final_model.param.degree = param.degree;
final_model.param.gamma = param.gamma;
final_model.param.coef0 = param.coef0;
final_model.nr_class = fm_k;
final_model.l = fm_l;
}
__global__ void copy_final_model() {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < fm_l) {
for(int j = 0; j< fm_k - 1; j++)
final_model.sv_coef[j][i] = models[0]->sv_coef[j][i];
const svm_node *src = models[0]->SV[i];
svm_node *dst = final_model.SV[i];
while(src->index != -1) {
dst->index = src->index;
dst->value = src->value;
src++;
dst++;
}
dst->index = -1;
final_model.sv_indices[i] = models[0]->sv_indices[i];
}
if (i < fm_k) {
if (models[0]->label)
final_model.label[i] = models[0]->label[i];
if (models[0]->nSV)
final_model.nSV[i] = models[0]->nSV[i];
}
if (i < fm_k*(fm_k-1)/2) {
final_model.rho[i] = models[0]->rho[i];
if (models[0]->probA)
final_model.probA[i] = models[0]->probA[i];
if (models[0]->probB)
final_model.probB[i] = models[0]->probB[i];
}
if (i == 0) {
svm_free_and_destroy_model(&models[0]);
final_model.free_sv = 1;
}
}
| 400f6094819934b38f3286c9eadea7a629c16bab.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <cuda.h>
#include "svm.h"
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
#define SUB_PROB 32
#define GRID_SIZE 1
void print_null(const char *s) {}
void exit_with_help()
{
printf(
"Usage: svm-train [options] training_set_file [model_file]\n"
"options:\n"
"-s svm_type : set type of SVM (default 0)\n"
" 0 -- C-SVC (multi-class classification)\n"
" 1 -- nu-SVC (multi-class classification)\n"
" 2 -- one-class SVM\n"
" 3 -- epsilon-SVR (regression)\n"
" 4 -- nu-SVR (regression)\n"
"-t kernel_type : set type of kernel function (default 2)\n"
" 0 -- linear: u'*v\n"
" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
" 4 -- precomputed kernel (kernel values in training_set_file)\n"
"-d degree : set degree in kernel function (default 3)\n"
"-g gamma : set gamma in kernel function (default 1/num_features)\n"
"-r coef0 : set coef0 in kernel function (default 0)\n"
"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
"-m cachesize : set cache memory size in MB (default 100)\n"
"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
"-v n: n-fold cross validation mode\n"
"-q : quiet mode (no outputs)\n"
);
exit(1);
}
void exit_input_error(int line_num)
{
fprintf(stderr,"Wrong input format at line %d\n", line_num);
exit(1);
}
int classes_index (double * d, int total, double y);
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
void read_problem(const char *filename);
void do_cross_validation ();
void get_classes_count (double ** classes, int ** count, int * total);
void allocate_final_model ();
__global__ void gpu_train (int * fm_k, int * fm_l, int * fm_space, int * fm_mask);
__global__ void create_subProblems (double * classes, int * count, int * prob_flags, int total);
__global__ void delete_subProblems ();
__global__ void copy_final_model ();
__device__ struct svm_parameter d_param;
__device__ struct svm_problem subproblems[SUB_PROB];
__device__ struct svm_model * models[SUB_PROB];
__device__ __managed__ struct svm_model final_model;
__device__ __managed__ int fm_k, fm_l, fm_space, fm_mask;
__device__ __managed__ struct svm_problem prob; // set by read_problem
__device__ __managed__ struct svm_node * x_space;
struct svm_parameter param; // set by parse_command_line
int cross_validation;
int nr_fold;
static char *line = NULL;
static int max_line_len;
static char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;
while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line,max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}
__global__ void pass_param (struct svm_parameter * temp_param) {
d_param = *temp_param;
}
int main(int argc, char **argv)
{
char input_file_name[1024];
char model_file_name[1024];
const char *error_msg;
parse_command_line(argc, argv, input_file_name, model_file_name);
read_problem(input_file_name);
error_msg = svm_check_parameter(&prob,¶m);
struct svm_parameter * temp_param;
cudaMalloc(&temp_param, sizeof(struct svm_parameter));
cudaMemcpy(temp_param, ¶m, sizeof(struct svm_parameter), cudaMemcpyHostToDevice);
pass_param<<<1,1>>>(temp_param);
cudaDeviceSynchronize();
cudaFree(temp_param);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After pass params", DEBUG, 0);
}
double * classes;
int * count, total;
get_classes_count(&classes, &count, &total);
double * d_classes;
int * d_count, * prob_flags;
cudaMalloc(&d_classes, total * sizeof(double));
cudaMalloc(&d_count, total * sizeof(int));
cudaMallocManaged(&prob_flags, prob.l * sizeof(int));
cudaMemcpy(d_classes, classes, total * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_count, count, total * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(prob_flags, 0, prob.l * sizeof(int));
create_subProblems<<<GRID_SIZE, SUB_PROB>>>(d_classes, d_count, prob_flags, total);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After create sub problems", DEBUG, 0);
}
cudaFree(d_classes);
cudaFree(d_count);
cudaFree(prob_flags);
free(classes);
free(count);
if(error_msg)
{
fprintf(stderr,"ERROR: %s\n",error_msg);
exit(1);
}
if(cross_validation)
{
do_cross_validation();
}
else
{
printf("Training models...\n");
gpu_train<<<GRID_SIZE, SUB_PROB>>>(&fm_k, &fm_l, &fm_space, &fm_mask);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After Train!!", DEBUG, 0);
}
int max = fm_k * (fm_k - 1) / 2;
if (fm_k > max) max = fm_k;
if (fm_l > max) max = fm_l;
dim3 dimGrid((int)ceil(max / 1024.0));
dim3 dimBlock(1024);
allocate_final_model();
// printf("Copying final model to host...\n");
copy_final_model<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
if(svm_save_model(model_file_name, &final_model))
{
fprintf(stderr, "can't save model to file %s\n", model_file_name);
exit(1);
}
cuda_destroy_model(&final_model);
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("Exiting program...\n");
exit(-1);
} else {
debug("After save model", DEBUG, 0);
}
}
delete_subProblems<<<GRID_SIZE, SUB_PROB>>>();
cudaDeviceSynchronize();
svm_destroy_param(¶m);
cudaFree(prob.y);
cudaFree(prob.x);
cudaFree(x_space);
free(line);
return 0;
}
void do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double *target = Malloc(double,prob.l);
svm_cross_validation(&prob,¶m,nr_fold,target);
if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
printf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
printf("Cross Validation Squared correlation coefficient = %g\n",
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
);
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
}
free(target);
}
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
{
int i;
void (*print_func)(const char*) = NULL; // default printing to stdout
// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;
cross_validation = 0;
// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
if(++i>=argc)
exit_with_help();
switch(argv[i-1][1])
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = &print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
fprintf(stderr,"n-fold cross validation: n must >= 2\n");
exit_with_help();
}
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
fprintf(stderr,"Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
}
}
svm_set_print_string_function(print_func);
// determine filenames
if(i>=argc)
exit_with_help();
strcpy(input_file_name, argv[i]);
if(i<argc-1)
sprintf(model_file_name,"%s.gpu",argv[i+1]);
else
{
char *p = strrchr(argv[i],'/');
if(p==NULL)
p = argv[i];
else
++p;
sprintf(model_file_name,"%s.model.gpu",p);
}
}
// read in a problem (in svmlight format)
void read_problem(const char *filename)
{
int max_index, inst_max_index, i;
size_t elements, j;
FILE *fp = fopen(filename,"r");
char *endptr;
char *idx, *val, *label;
if(fp == NULL)
{
fprintf(stderr,"can't open input file %s\n",filename);
exit(1);
}
prob.l = 0;
elements = 0;
max_line_len = 1024;
line = Malloc(char,max_line_len);
while(readline(fp)!=NULL)
{
char *p = strtok(line," \t"); // label
// features
while(1)
{
p = strtok(NULL," \t");
if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
break;
++elements;
}
++elements;
++prob.l;
}
rewind(fp);
cudaMallocManaged(&prob.y, prob.l * sizeof(double));
cudaMallocManaged(&prob.x, prob.l * sizeof(struct svm_node*));
cudaMallocManaged(&x_space, elements * sizeof(struct svm_node));
max_index = 0;
j=0;
for(i=0;i<prob.l;i++)
{
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
readline(fp);
prob.x[i] = &x_space[j];
label = strtok(line," \t\n");
if(label == NULL) // empty line
exit_input_error(i+1);
prob.y[i] = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
exit_input_error(i+1);
while(1)
{
idx = strtok(NULL,":");
val = strtok(NULL," \t");
if(val == NULL)
break;
errno = 0;
x_space[j].index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
exit_input_error(i+1);
else
inst_max_index = x_space[j].index;
errno = 0;
x_space[j].value = strtod(val,&endptr);
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
exit_input_error(i+1);
++j;
}
if(inst_max_index > max_index)
max_index = inst_max_index;
x_space[j++].index = -1;
}
if(param.gamma == 0 && max_index > 0)
param.gamma = 1.0/max_index;
if(param.kernel_type == PRECOMPUTED)
for(i=0;i<prob.l;i++)
{
if (prob.x[i][0].index != 0)
{
fprintf(stderr,"Wrong input format: first column must be 0:sample_serial_number\n");
exit(1);
}
if ((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > max_index)
{
fprintf(stderr,"Wrong input format: sample_serial_number out of range\n");
exit(1);
}
}
fclose(fp);
}
int classes_index (double * d, int total, double y) {
for (int i = 0; i < total; i++) {
if (d[i] == y) return i;
}
return -1;
}
// number of classes (total) and count of examples of each class (count)
void get_classes_count (double ** classes_ret, int ** count_ret, int * total_ret) {
int size = 16;
double * classes = Malloc(double, size);
int * count = (int *) calloc(size, sizeof(int));
int total = 0;
for (int j = 0; j < prob.l; j++) {
int i = classes_index(classes, total, prob.y[j]);
if (i != -1) {
count[i]++;
} else {
classes[total] = prob.y[j];
count[total]++;
total++;
if (total == size) {
size *= 2;
classes = (double *) realloc(classes, size * sizeof(double));
count = (int *) realloc(count, size * sizeof(int));
}
}
}
*classes_ret = classes;
*count_ret = count;
*total_ret = total;
}
__global__ void create_subProblems (double * classes, int * count, int * prob_flags, int total) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// number of examples per class
int * count_per_problem = Malloc(int, total);
for (int j = 0; j < total; j++) {
int r = count[j] % SUB_PROB;
count_per_problem[j] = count[j] / SUB_PROB;
if (i < r) count_per_problem[j]++;
}
// allocate subproblem
int sum = 0;
for (int j = 0; j < total; j++) {
sum += count_per_problem[j];
}
subproblems[i].l = sum;
subproblems[i].y = Malloc(double, sum);
subproblems[i].x = Malloc(struct svm_node*, sum);
// populate subproblem
int start = i, counter = 0;
while (counter != sum) {
if (prob_flags[start] != 0) {
start = (start + 1) % prob.l;
continue;
}
atomicAdd(&prob_flags[start], 1);
int class_index = -1;
for (int j = 0; j < total; j++) {
if (classes[j] == prob.y[start]) {
class_index = j;
break;
}
}
if (count_per_problem[class_index] != 0) {
count_per_problem[class_index]--;
subproblems[i].y[counter] = prob.y[start];
subproblems[i].x[counter] = prob.x[start];
counter++;
} else {
atomicSub(&prob_flags[start], 1);
}
start = (start + 1) % prob.l;
}
free(count_per_problem);
// allocate subproblem
// int l = prob.l / SUB_PROB;
// subproblems[i].l = l;
// subproblems[i].y = Malloc(double, l);
// subproblems[i].x = Malloc(struct svm_node*, l);
// populate subproblems
// for (int j = i * l, k = 0; k < l; j++, k++) {
// subproblems[i].y[k] = prob.y[j];
// subproblems[i].x[k] = prob.x[j];
// }
}
__global__ void delete_subProblems () {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (subproblems[i].y) {
free(subproblems[i].y);
free(subproblems[i].x);
}
}
__global__ void gpu_train (int * fm_k, int * fm_l, int * fm_space, int * fm_mask) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int k = 1; k <= SUB_PROB; k*=2) {
if (i < SUB_PROB/k) {
models[i] = svm_train(&subproblems[i], &d_param);
}
__syncthreads();
if (i < SUB_PROB/(2*k)) {
int offset = SUB_PROB/(2*k);
// if (models[i]->l + models[i+offset]->l != 0) {
subproblems[i].l = models[i]->l + models[i+offset]->l;
free(subproblems[i].y);
free(subproblems[i].x);
subproblems[i].y = Malloc(double, subproblems[i].l);
subproblems[i].x = Malloc(struct svm_node*, subproblems[i].l);
int index1 = 0;
for (int cl = 0; cl < models[i]->nr_class; cl++) {
for (int j = 0; j < models[i]->nSV[cl]; j++) {
subproblems[i].y[index1] = models[i]->label[cl];
subproblems[i].x[index1] = models[i]->SV[index1];
index1++;
}
}
int index2 = 0;
for (int cl = 0; cl < models[i+offset]->nr_class; cl++) {
for (int j = 0; j < models[i+offset]->nSV[cl]; j++) {
subproblems[i].y[index1] = models[i+offset]->label[cl];
subproblems[i].x[index1] = models[i+offset]->SV[index2];
index1++;
index2++;
}
}
// }
}
__syncthreads();
int start = (k != SUB_PROB) ? 0 : 1;
if (i >= start && i < SUB_PROB/k) {
svm_free_and_destroy_model(&models[i]);
}
__syncthreads();
}
if (i == 0) {
int space, max = 0;
const svm_node * p = models[0]->SV[0];
for (int j = 0; j < models[0]->l; j++) {
space = 0;
while (p->index != -1) {
space++;
p++;
}
if (space > max) max = space;
}
int mask = 0;
if (models[0]->label) mask += 1;
if (models[0]->probA) mask += 2;
if (models[0]->probB) mask += 4;
*fm_k = models[0]->nr_class;
*fm_l = models[0]->l;
*fm_space = max + 1;
*fm_mask = mask;
}
}
void allocate_final_model () {
int fm_t = fm_k * (fm_k - 1) / 2;
struct svm_node ** SV;
double ** sv_coef, * rho, * probA = NULL, * probB = NULL;
int * sv_indices, * label = NULL, * nSV;
cudaMallocManaged(&SV, fm_l * sizeof(struct svm_node*));
for (int i = 0; i < fm_l; i++)
cudaMallocManaged(&SV[i], fm_space * sizeof(struct svm_node));
cudaMallocManaged(&sv_coef, (fm_k-1) * sizeof(double*));
for (int i = 0; i < fm_k - 1; i++)
cudaMallocManaged(&sv_coef[i], fm_l * sizeof(double));
cudaMallocManaged(&rho, fm_t * sizeof(double));
if (fm_mask & 2)
cudaMallocManaged(&probA, fm_t * sizeof(double));
if (fm_mask & 4)
cudaMallocManaged(&probB, fm_t * sizeof(double));
cudaMallocManaged(&sv_indices, fm_l * sizeof(int));
if (fm_mask & 1)
cudaMallocManaged(&label, fm_k * sizeof(int));
cudaMallocManaged(&nSV, fm_k * sizeof(int));
final_model.SV = SV;
final_model.sv_coef = sv_coef;
final_model.rho = rho;
final_model.probA = probA;
final_model.probB = probB;
final_model.sv_indices = sv_indices;
final_model.label = label;
final_model.nSV = nSV;
final_model.param.svm_type = param.svm_type;
final_model.param.kernel_type = param.kernel_type;
final_model.param.degree = param.degree;
final_model.param.gamma = param.gamma;
final_model.param.coef0 = param.coef0;
final_model.nr_class = fm_k;
final_model.l = fm_l;
}
__global__ void copy_final_model() {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < fm_l) {
for(int j = 0; j< fm_k - 1; j++)
final_model.sv_coef[j][i] = models[0]->sv_coef[j][i];
const svm_node *src = models[0]->SV[i];
svm_node *dst = final_model.SV[i];
while(src->index != -1) {
dst->index = src->index;
dst->value = src->value;
src++;
dst++;
}
dst->index = -1;
final_model.sv_indices[i] = models[0]->sv_indices[i];
}
if (i < fm_k) {
if (models[0]->label)
final_model.label[i] = models[0]->label[i];
if (models[0]->nSV)
final_model.nSV[i] = models[0]->nSV[i];
}
if (i < fm_k*(fm_k-1)/2) {
final_model.rho[i] = models[0]->rho[i];
if (models[0]->probA)
final_model.probA[i] = models[0]->probA[i];
if (models[0]->probB)
final_model.probB[i] = models[0]->probB[i];
}
if (i == 0) {
svm_free_and_destroy_model(&models[0]);
final_model.free_sv = 1;
}
}
|
29a9a995ced7435eb9fcb067bd6a431695a41abc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=11)){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 29a9a995ced7435eb9fcb067bd6a431695a41abc.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=11)){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
304ddb7c859a1211b3cfe7924aab43972521a3b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
#include "boost/date_time/posix_time/posix_time.hpp"
// --------------------------------------------------------------------------
// kernel code
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifndef GPU
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
const int kernel_size = kernel_h * kernel_w;
const int HW = H * W;
real* p_bottom5d_hwc = bottom5d;
//memset(p_bottom5d_hwc, 0, sizeof(real) * kernel_size * H5 * W5 * C);
for (int h5 = 0; h5 < H5; ++h5) {
const int h_start = h5 * stride_h - pad_h;
//const int kh_start = MAX(0, -h_start);
//const int kh_end = MIN(kernel_h, H - h_start);
for (int w5 = 0; w5 < W5; ++w5) {
const int w_start = w5 * stride_w - pad_w;
//const int kw_start = MAX(0, -w_start);
//const int kw_end = MIN(kernel_w, W - w_start);
const real* const p_bottom3d_hw = bottom3d + h_start * W + w_start;
for (int c = 0; c < C; ++c) {
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const real* const p_bottom3d = p_bottom3d_hw + c * HW;
real* const p_bottom5d = p_bottom5d_hwc;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw)] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw] : 0;
}
}
/*
for (int kh = kh_start; kh < kh_end; ++kh) {
for (int kw = kw_start; kw < kw_end; ++kw) {
p_bottom5d[kh * kernel_w + kw] = p_bottom3d[kh * W + kw];
}
}
*/
p_bottom5d_hwc += kernel_size;
} // endfor c
} // endfor w5
} // endfor h5
}
#else
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// conv_forward
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real* const temp_data,
const real* const const_data,
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int num_groups = weight5d->shape[0][0]; // G
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
{
float time = 0;
clock_t tick0 = clock();
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = num_groups * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( convert_bottom_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
clock_t tick1 = clock();
time = (float)(tick1 - tick0) / CLOCKS_PER_SEC;
printf("%f ", time);
}
float time = 0;
clock_t tick0 = clock();
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < num_groups; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasTrans,
top_C, top_area, kernel_size,
1.0f,
p_weight_g, kernel_size,
p_temp_g, kernel_size,//top_area,
0.0f,
p_top_g, top_area);
#endif
}
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = num_groups * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_channels, top_area, 1,
1.0f,
bias1d->data, 1,
const_data, top_area,
1.0f,
p_top_item, top_area);
#endif
}
clock_t tick1 = clock();
time = (float)(tick1 - tick0) / CLOCKS_PER_SEC;
printf("%f\n", time);
// locate next item
{
const int bottom_size = num_groups * bottom_C * bottom_H * bottom_W;
const int top_size = num_groups * top_C * top_H * top_W;
//const int temp_size =
// num_groups * bottom_C * kernel_h * kernel_w * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
//p_temp_data += temp_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
int* const temp_size,
int* const const_size,
const LayerOption* const option)
{
const int num_groups = option->num_groups; // G
const int top_C = option->out_channels / option->num_groups; // C'
const int bottom_C = bottom3d->shape[0][0] / option->num_groups; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, total_top_area = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += num_groups * top_C * top_H * top_W;
// sum(H' * W') & max(H' * W') in the batch
total_top_area += top_area;
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = num_groups;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = num_groups * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * sum(H' * W')
*temp_size = num_groups * bottom_C * kernel_h * kernel_w * max_top_area;
// constant data size: max(H' * W')
*const_size = max_top_area;
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_forward(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
net->temp_data, net->const_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int temp_size, const_size;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_shape(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
&temp_size, &const_size, &layer->option);
update_net_size(net, layer, temp_size, 0, const_size);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, W, b;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *W_data = NULL, *b_data = NULL;
real *p_temp_data = NULL, *const_data = NULL, *p_const_data = NULL;
LayerOption option;
int temp_size, const_size;
// set option
{
option.num_groups = 1;
option.out_channels = 512;
option.kernel_h = 1;
option.kernel_w = 1;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 1;
option.stride_w = 1;
option.bias = 1;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
conv_shape(&X, &Y, &W, &b, &temp_size, &const_size, &option);
Y_true_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
W_data = load_data("../data/temp/conv_param0.bin",
&ndim, shape, NULL);
if (option.bias) {
b_data = load_data("../data/temp/conv_param1.bin",
&ndim, shape, NULL);
const_data = (real*)malloc(const_size * sizeof(real));
for (int i = 0; i < const_size; ++i) {
const_data[i] = 1;
}
}
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
hipSetDevice(0);
option.handle = (hipblasHandle_t*)malloc(sizeof(hipblasHandle_t));
if (hipblasCreate((hipblasHandle_t*)option.handle)
!= HIPBLAS_STATUS_SUCCESS) {
printf("cublas creation failed\n");
}
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
const int W_size = flatten_size(&W);
const int b_size = flatten_size(&b);
printf("gpu malloc\n");
hipMalloc(&X.data, X_size * sizeof(real));
hipMalloc(&Y.data, Y_size * sizeof(real));
hipMalloc(&W.data, W_size * sizeof(real));
hipMalloc(&p_temp_data, temp_size * sizeof(real));
if (option.bias) {
hipMalloc(&b.data, b_size * sizeof(real));
hipMalloc(&p_const_data, const_size * sizeof(real));
}
else {
b.data = NULL;
}
printf("memcpy: cpu -> gpu\n");
hipMemcpyAsync(X.data, X_data, X_size * sizeof(real),
hipMemcpyHostToDevice);
hipMemcpyAsync(W.data, W_data, W_size * sizeof(real),
hipMemcpyHostToDevice);
if (option.bias) {
hipMemcpyAsync(b.data, b_data, b_size * sizeof(real),
hipMemcpyHostToDevice);
hipMemcpyAsync(p_const_data, const_data, const_size * sizeof(real),
hipMemcpyHostToDevice);
}
}
#else
{
X.data = X_data;
Y.data = Y_data;
W.data = W_data;
p_temp_data = (real*)malloc(temp_size * sizeof(real));
if (option.bias) {
b.data = b_data;
p_const_data = const_data;
}
else {
b.data = NULL;
}
}
#endif
// do forward operation
{
printf("do forward\n");
conv_forward(&X, &Y, &W, &b, p_temp_data, p_const_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
hipMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
hipMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
real diff = ABS(Y_data[i] - Y_true_data[i]);
diff /= 1e-10f + MIN(ABS(Y_data[i]), ABS(Y_true_data[i]));
#ifdef GPU
if (diff > 0) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#else
if (diff > 1e-3f) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#endif
++i;
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
free(W_data);
if (option.bias) {
free(b_data);
free(const_data);
}
}
#ifdef GPU
{
printf("gpu free\n");
hipFree(X.data);
hipFree(Y.data);
hipFree(W.data);
hipFree(p_temp_data);
if (option.bias) {
hipFree(b.data);
hipFree(p_const_data);
}
if (hipblasDestroy(*((hipblasHandle_t*)option.handle))
!= HIPBLAS_STATUS_SUCCESS) {
printf("cublas destruction failed\n");
}
free(option.handle);
}
#else
{
free(p_temp_data);
}
#endif
return 0;
}
#endif // endifdef TEST
| 304ddb7c859a1211b3cfe7924aab43972521a3b0.cu | #include "layer.h"
#include "boost/date_time/posix_time/posix_time.hpp"
// --------------------------------------------------------------------------
// kernel code
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifndef GPU
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
const int kernel_size = kernel_h * kernel_w;
const int HW = H * W;
real* p_bottom5d_hwc = bottom5d;
//memset(p_bottom5d_hwc, 0, sizeof(real) * kernel_size * H5 * W5 * C);
for (int h5 = 0; h5 < H5; ++h5) {
const int h_start = h5 * stride_h - pad_h;
//const int kh_start = MAX(0, -h_start);
//const int kh_end = MIN(kernel_h, H - h_start);
for (int w5 = 0; w5 < W5; ++w5) {
const int w_start = w5 * stride_w - pad_w;
//const int kw_start = MAX(0, -w_start);
//const int kw_end = MIN(kernel_w, W - w_start);
const real* const p_bottom3d_hw = bottom3d + h_start * W + w_start;
for (int c = 0; c < C; ++c) {
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const real* const p_bottom3d = p_bottom3d_hw + c * HW;
real* const p_bottom5d = p_bottom5d_hwc;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw)] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw] : 0;
}
}
/*
for (int kh = kh_start; kh < kh_end; ++kh) {
for (int kw = kw_start; kw < kw_end; ++kw) {
p_bottom5d[kh * kernel_w + kw] = p_bottom3d[kh * W + kw];
}
}
*/
p_bottom5d_hwc += kernel_size;
} // endfor c
} // endfor w5
} // endfor h5
}
#else
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// conv_forward
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real* const temp_data,
const real* const const_data,
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int num_groups = weight5d->shape[0][0]; // G
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
{
float time = 0;
clock_t tick0 = clock();
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = num_groups * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
convert_bottom_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
clock_t tick1 = clock();
time = (float)(tick1 - tick0) / CLOCKS_PER_SEC;
printf("%f ", time);
}
float time = 0;
clock_t tick0 = clock();
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < num_groups; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasTrans,
top_C, top_area, kernel_size,
1.0f,
p_weight_g, kernel_size,
p_temp_g, kernel_size,//top_area,
0.0f,
p_top_g, top_area);
#endif
}
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = num_groups * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_channels, top_area, 1,
1.0f,
bias1d->data, 1,
const_data, top_area,
1.0f,
p_top_item, top_area);
#endif
}
clock_t tick1 = clock();
time = (float)(tick1 - tick0) / CLOCKS_PER_SEC;
printf("%f\n", time);
// locate next item
{
const int bottom_size = num_groups * bottom_C * bottom_H * bottom_W;
const int top_size = num_groups * top_C * top_H * top_W;
//const int temp_size =
// num_groups * bottom_C * kernel_h * kernel_w * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
//p_temp_data += temp_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
int* const temp_size,
int* const const_size,
const LayerOption* const option)
{
const int num_groups = option->num_groups; // G
const int top_C = option->out_channels / option->num_groups; // C'
const int bottom_C = bottom3d->shape[0][0] / option->num_groups; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, total_top_area = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += num_groups * top_C * top_H * top_W;
// sum(H' * W') & max(H' * W') in the batch
total_top_area += top_area;
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = num_groups;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = num_groups * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * sum(H' * W')
*temp_size = num_groups * bottom_C * kernel_h * kernel_w * max_top_area;
// constant data size: max(H' * W')
*const_size = max_top_area;
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_forward(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
net->temp_data, net->const_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int temp_size, const_size;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_shape(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
&temp_size, &const_size, &layer->option);
update_net_size(net, layer, temp_size, 0, const_size);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, W, b;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *W_data = NULL, *b_data = NULL;
real *p_temp_data = NULL, *const_data = NULL, *p_const_data = NULL;
LayerOption option;
int temp_size, const_size;
// set option
{
option.num_groups = 1;
option.out_channels = 512;
option.kernel_h = 1;
option.kernel_w = 1;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 1;
option.stride_w = 1;
option.bias = 1;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
conv_shape(&X, &Y, &W, &b, &temp_size, &const_size, &option);
Y_true_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
W_data = load_data("../data/temp/conv_param0.bin",
&ndim, shape, NULL);
if (option.bias) {
b_data = load_data("../data/temp/conv_param1.bin",
&ndim, shape, NULL);
const_data = (real*)malloc(const_size * sizeof(real));
for (int i = 0; i < const_size; ++i) {
const_data[i] = 1;
}
}
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
cudaSetDevice(0);
option.handle = (cublasHandle_t*)malloc(sizeof(cublasHandle_t));
if (cublasCreate((cublasHandle_t*)option.handle)
!= CUBLAS_STATUS_SUCCESS) {
printf("cublas creation failed\n");
}
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
const int W_size = flatten_size(&W);
const int b_size = flatten_size(&b);
printf("gpu malloc\n");
cudaMalloc(&X.data, X_size * sizeof(real));
cudaMalloc(&Y.data, Y_size * sizeof(real));
cudaMalloc(&W.data, W_size * sizeof(real));
cudaMalloc(&p_temp_data, temp_size * sizeof(real));
if (option.bias) {
cudaMalloc(&b.data, b_size * sizeof(real));
cudaMalloc(&p_const_data, const_size * sizeof(real));
}
else {
b.data = NULL;
}
printf("memcpy: cpu -> gpu\n");
cudaMemcpyAsync(X.data, X_data, X_size * sizeof(real),
cudaMemcpyHostToDevice);
cudaMemcpyAsync(W.data, W_data, W_size * sizeof(real),
cudaMemcpyHostToDevice);
if (option.bias) {
cudaMemcpyAsync(b.data, b_data, b_size * sizeof(real),
cudaMemcpyHostToDevice);
cudaMemcpyAsync(p_const_data, const_data, const_size * sizeof(real),
cudaMemcpyHostToDevice);
}
}
#else
{
X.data = X_data;
Y.data = Y_data;
W.data = W_data;
p_temp_data = (real*)malloc(temp_size * sizeof(real));
if (option.bias) {
b.data = b_data;
p_const_data = const_data;
}
else {
b.data = NULL;
}
}
#endif
// do forward operation
{
printf("do forward\n");
conv_forward(&X, &Y, &W, &b, p_temp_data, p_const_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
cudaMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
cudaMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
real diff = ABS(Y_data[i] - Y_true_data[i]);
diff /= 1e-10f + MIN(ABS(Y_data[i]), ABS(Y_true_data[i]));
#ifdef GPU
if (diff > 0) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#else
if (diff > 1e-3f) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#endif
++i;
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
free(W_data);
if (option.bias) {
free(b_data);
free(const_data);
}
}
#ifdef GPU
{
printf("gpu free\n");
cudaFree(X.data);
cudaFree(Y.data);
cudaFree(W.data);
cudaFree(p_temp_data);
if (option.bias) {
cudaFree(b.data);
cudaFree(p_const_data);
}
if (cublasDestroy(*((cublasHandle_t*)option.handle))
!= CUBLAS_STATUS_SUCCESS) {
printf("cublas destruction failed\n");
}
free(option.handle);
}
#else
{
free(p_temp_data);
}
#endif
return 0;
}
#endif // endifdef TEST
|
1224aa6def40500553310556dd423d669b53b6b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#include<string.h>
#include<ctime>
#define BLOCK_NUM 4 //
#define THREAD_NUM 2 //
#define R_SIZE (BLOCK_NUM * THREAD_NUM) //
#define M_SIZE (R_SIZE * R_SIZE) //
__global__ void mat_mul(int* mat1, int* mat2, int* result)
{
const int bid = blockIdx.x; // id
const int tid = threadIdx.x; // id
//
const int row = bid * THREAD_NUM + tid; //
for (int c = 0; c < R_SIZE; c++)
{
for (int n = 0; n < R_SIZE; n++)
{
result[row * R_SIZE + c] += mat1[row * R_SIZE + n] * mat2[n * R_SIZE + c];
}
}
}
int main(int argc, char* argv[])
{
int* mat1, *mat2, *result;
int* g_mat1, *g_mat2, *g_mat_result;
double time_pc, time_normal;
clock_t startTime, endTime;
//
mat1 = (int*)malloc(M_SIZE * sizeof(int));
mat2 = (int*)malloc(M_SIZE * sizeof(int));
result = (int*)malloc(M_SIZE * sizeof(int));
// initialize
for (int i = 0; i < M_SIZE; i++)
{
mat1[i] = rand() % 10;
mat2[i] = rand() % 10;
result[i] = 0;
}
printf(" 1 \n");
for (int i = 0; i < M_SIZE; i++)
if((i + 1) % R_SIZE == 0)
printf("%d\n", mat1[i]);
else
printf("%d ", mat1[i]);
printf("\n 2 \n");
for (int i = 0; i < M_SIZE; i++)
if ((i + 1) % R_SIZE == 0)
printf("%d\n", mat2[i]);
else
printf("%d ", mat2[i]);
hipMalloc((void**)&g_mat1, sizeof(int) * M_SIZE);
hipMalloc((void**)&g_mat2, sizeof(int) * M_SIZE);
hipMalloc((void**)&g_mat_result, sizeof(int) * M_SIZE);
hipMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, hipMemcpyHostToDevice);
hipMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, hipMemcpyHostToDevice);
/**/
startTime = clock();//
hipLaunchKernelGGL(( mat_mul) , dim3(BLOCK_NUM), dim3(THREAD_NUM) , 0, 0, g_mat1, g_mat2, g_mat_result);
hipMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, hipMemcpyDeviceToHost);
endTime = clock();//
time_pc = (double)(endTime - startTime) / CLOCKS_PER_SEC;
printf(": %lf s\n", time_pc);
/**/
startTime = clock();//
for (int r = 0; r < R_SIZE; r++) {
for (int c = 0; c < R_SIZE; c++) {
for (int n = 0; n < R_SIZE; n++) {
result[r * R_SIZE + c] += mat1[r * R_SIZE + n] * mat2[n * R_SIZE + c];
}
}
}
endTime = clock();//
time_normal = (double)(endTime - startTime) / CLOCKS_PER_SEC;
printf(": %lf s\n", time_normal);
printf("%lf\n", time_normal / time_pc);
printf("\n\n");
for (int i = 0; i < M_SIZE; i++)
if ((i + 1) % R_SIZE == 0)
printf("%d\n\n", result[i]);
else
printf("%d ", result[i]);
} | 1224aa6def40500553310556dd423d669b53b6b2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#include<string.h>
#include<ctime>
#define BLOCK_NUM 4 // 块数量
#define THREAD_NUM 2 // 每个块中的线程数
#define R_SIZE (BLOCK_NUM * THREAD_NUM) // 矩阵行列数
#define M_SIZE (R_SIZE * R_SIZE) //矩阵规模
__global__ void mat_mul(int* mat1, int* mat2, int* result)
{
const int bid = blockIdx.x; //块 id
const int tid = threadIdx.x; //进程 id
// 每个线程计算一行
const int row = bid * THREAD_NUM + tid; //计算当前进程所需计算的行数
for (int c = 0; c < R_SIZE; c++)
{
for (int n = 0; n < R_SIZE; n++)
{
result[row * R_SIZE + c] += mat1[row * R_SIZE + n] * mat2[n * R_SIZE + c];
}
}
}
int main(int argc, char* argv[])
{
int* mat1, *mat2, *result;
int* g_mat1, *g_mat2, *g_mat_result;
double time_pc, time_normal;
clock_t startTime, endTime;
// 用一位数组表示二维矩阵
mat1 = (int*)malloc(M_SIZE * sizeof(int));
mat2 = (int*)malloc(M_SIZE * sizeof(int));
result = (int*)malloc(M_SIZE * sizeof(int));
// initialize
for (int i = 0; i < M_SIZE; i++)
{
mat1[i] = rand() % 10;
mat2[i] = rand() % 10;
result[i] = 0;
}
printf("矩阵 1 为:\n");
for (int i = 0; i < M_SIZE; i++)
if((i + 1) % R_SIZE == 0)
printf("%d\n", mat1[i]);
else
printf("%d ", mat1[i]);
printf("\n矩阵 2 为:\n");
for (int i = 0; i < M_SIZE; i++)
if ((i + 1) % R_SIZE == 0)
printf("%d\n", mat2[i]);
else
printf("%d ", mat2[i]);
cudaMalloc((void**)&g_mat1, sizeof(int) * M_SIZE);
cudaMalloc((void**)&g_mat2, sizeof(int) * M_SIZE);
cudaMalloc((void**)&g_mat_result, sizeof(int) * M_SIZE);
cudaMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
/*并行方法*/
startTime = clock();//计时开始
mat_mul <<<BLOCK_NUM, THREAD_NUM >>> (g_mat1, g_mat2, g_mat_result);
cudaMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, cudaMemcpyDeviceToHost);
endTime = clock();//计时结束
time_pc = (double)(endTime - startTime) / CLOCKS_PER_SEC;
printf("并行所用时间: %lf s\n", time_pc);
/*串行方法*/
startTime = clock();//计时开始
for (int r = 0; r < R_SIZE; r++) {
for (int c = 0; c < R_SIZE; c++) {
for (int n = 0; n < R_SIZE; n++) {
result[r * R_SIZE + c] += mat1[r * R_SIZE + n] * mat2[n * R_SIZE + c];
}
}
}
endTime = clock();//计时结束
time_normal = (double)(endTime - startTime) / CLOCKS_PER_SEC;
printf("串行所用时间: %lf s\n", time_normal);
printf("加速比为:%lf\n", time_normal / time_pc);
printf("\n二矩阵乘积为:\n");
for (int i = 0; i < M_SIZE; i++)
if ((i + 1) % R_SIZE == 0)
printf("%d\n\n", result[i]);
else
printf("%d ", result[i]);
} |
858b4df734bc70db963a294c1a4a52f57ee5c612.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ double get_collective_dist(int *dist, int rows, int cols, int col) {
double sum = 0;
for (int i = 0; i < rows; i++) {
if (dist[i * cols + col] == 0) {
return 0;
}
sum += (1 / (double)dist[i * cols + col]);
}
return sum;
}
__global__ void collective_dist_kernel(int *dist, int rows, int cols, double *col_dist)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < cols) {
col_dist[tid] = get_collective_dist(dist, rows, cols, tid);
tid += blockDim.x * gridDim.x;
}
} | 858b4df734bc70db963a294c1a4a52f57ee5c612.cu | #include "includes.h"
__device__ double get_collective_dist(int *dist, int rows, int cols, int col) {
double sum = 0;
for (int i = 0; i < rows; i++) {
if (dist[i * cols + col] == 0) {
return 0;
}
sum += (1 / (double)dist[i * cols + col]);
}
return sum;
}
__global__ void collective_dist_kernel(int *dist, int rows, int cols, double *col_dist)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < cols) {
col_dist[tid] = get_collective_dist(dist, rows, cols, tid);
tid += blockDim.x * gridDim.x;
}
} |
431df291a8ac7cf4656be22c3f24677a5fa16e34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_rcbrtf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_rcbrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_rcbrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_rcbrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 431df291a8ac7cf4656be22c3f24677a5fa16e34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_rcbrtf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_rcbrtf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_rcbrtf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_rcbrtf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3312cdb6500476a855d986f8e335f8982ce22a09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/gru_unit_op.h"
namespace caffe2 {
namespace detail {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename T>
__global__ void GRUUnitKernel(
const int ND,
const int dim,
const int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
bool drop_states,
T* H) {
// index is virtual thread ID in range [0, ND)
CUDA_1D_KERNEL_LOOP(index, ND) {
const int n = index / dim;
const int d = index % dim;
const bool valid = seqLengths == nullptr || t < seqLengths[n];
if (!valid) {
H[index] = H_prev[index] * !drop_states;
} else {
const T* X_offset = X + 3 * dim * n;
const T update = X_offset[1 * dim + d];
const T output = X_offset[2 * dim + d];
T sigmoid_update = cuda_sigmoid(update);
H[index] = H_prev[index] * sigmoid_update +
tanh(output) * (1.0f - sigmoid_update);
}
}
}
template <typename T>
__global__ void GRUUnitGradientKernel(
const int ND,
const int dim,
const int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
const T* H,
const T* H_diff,
bool drop_states,
T* H_prev_diff,
T* X_diff) {
CUDA_1D_KERNEL_LOOP(index, ND) {
const int n = index / dim;
const bool valid = seqLengths == nullptr || t < seqLengths[n];
const int d = index % dim;
const T* X_offset = X + 3 * dim * n;
T* h_prev_diff = H_prev_diff + index;
T* X_diff_offset = X_diff + 3 * dim * n;
T* reset_diff = X_diff_offset + 0 * dim + d;
T* update_diff = X_diff_offset + 1 * dim + d;
T* output_diff = X_diff_offset + 2 * dim + d;
if (!valid) {
*h_prev_diff = H_diff[index] * !drop_states;
*reset_diff = 0;
*update_diff = 0;
*output_diff = 0;
} else {
const T u = cuda_sigmoid(X_offset[1 * dim + d]);
const T o = tanh(X_offset[2 * dim + d]);
*h_prev_diff = H_diff[index] * u;
*reset_diff = 0; // 0 contribution to gradient from this operation
*update_diff =
(H_diff[index] * H_prev[index] - H_diff[index] * o) * u * (1.0f - u);
*output_diff = H_diff[index] * (1.0f - u) * (1.0f - o * o);
}
}
}
template <>
void GRUUnit<float, CUDAContext>(
int N,
int D,
int t,
const float* H_prev,
const float* X,
const int32_t* seqLengths,
bool drop_states,
float* H,
CUDAContext* context) {
hipLaunchKernelGGL(( GRUUnitKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N * D, D, t, H_prev, X, seqLengths, drop_states, H);
}
template <>
void GRUUnitGradient<float, CUDAContext>(
int N,
int D,
int t,
const float* H_prev,
const float* X,
const int32_t* seqLengths,
const float* H,
const float* H_diff,
bool drop_states,
float* H_prev_diff,
float* X_diff,
CUDAContext* context) {
hipLaunchKernelGGL(( GRUUnitGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N * D,
D,
t,
H_prev,
X,
seqLengths,
H,
H_diff,
drop_states,
H_prev_diff,
X_diff);
}
}
REGISTER_CUDA_OPERATOR(GRUUnit, GRUUnitOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GRUUnitGradient, GRUUnitGradientOp<float, CUDAContext>);
}
| 3312cdb6500476a855d986f8e335f8982ce22a09.cu | #include <algorithm>
#include <cmath>
#include <vector>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/gru_unit_op.h"
namespace caffe2 {
namespace detail {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename T>
__global__ void GRUUnitKernel(
const int ND,
const int dim,
const int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
bool drop_states,
T* H) {
// index is virtual thread ID in range [0, ND)
CUDA_1D_KERNEL_LOOP(index, ND) {
const int n = index / dim;
const int d = index % dim;
const bool valid = seqLengths == nullptr || t < seqLengths[n];
if (!valid) {
H[index] = H_prev[index] * !drop_states;
} else {
const T* X_offset = X + 3 * dim * n;
const T update = X_offset[1 * dim + d];
const T output = X_offset[2 * dim + d];
T sigmoid_update = cuda_sigmoid(update);
H[index] = H_prev[index] * sigmoid_update +
tanh(output) * (1.0f - sigmoid_update);
}
}
}
template <typename T>
__global__ void GRUUnitGradientKernel(
const int ND,
const int dim,
const int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
const T* H,
const T* H_diff,
bool drop_states,
T* H_prev_diff,
T* X_diff) {
CUDA_1D_KERNEL_LOOP(index, ND) {
const int n = index / dim;
const bool valid = seqLengths == nullptr || t < seqLengths[n];
const int d = index % dim;
const T* X_offset = X + 3 * dim * n;
T* h_prev_diff = H_prev_diff + index;
T* X_diff_offset = X_diff + 3 * dim * n;
T* reset_diff = X_diff_offset + 0 * dim + d;
T* update_diff = X_diff_offset + 1 * dim + d;
T* output_diff = X_diff_offset + 2 * dim + d;
if (!valid) {
*h_prev_diff = H_diff[index] * !drop_states;
*reset_diff = 0;
*update_diff = 0;
*output_diff = 0;
} else {
const T u = cuda_sigmoid(X_offset[1 * dim + d]);
const T o = tanh(X_offset[2 * dim + d]);
*h_prev_diff = H_diff[index] * u;
*reset_diff = 0; // 0 contribution to gradient from this operation
*update_diff =
(H_diff[index] * H_prev[index] - H_diff[index] * o) * u * (1.0f - u);
*output_diff = H_diff[index] * (1.0f - u) * (1.0f - o * o);
}
}
}
template <>
void GRUUnit<float, CUDAContext>(
int N,
int D,
int t,
const float* H_prev,
const float* X,
const int32_t* seqLengths,
bool drop_states,
float* H,
CUDAContext* context) {
GRUUnitKernel<float>
<<<CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N * D, D, t, H_prev, X, seqLengths, drop_states, H);
}
template <>
void GRUUnitGradient<float, CUDAContext>(
int N,
int D,
int t,
const float* H_prev,
const float* X,
const int32_t* seqLengths,
const float* H,
const float* H_diff,
bool drop_states,
float* H_prev_diff,
float* X_diff,
CUDAContext* context) {
GRUUnitGradientKernel<float>
<<<CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N * D,
D,
t,
H_prev,
X,
seqLengths,
H,
H_diff,
drop_states,
H_prev_diff,
X_diff);
}
}
REGISTER_CUDA_OPERATOR(GRUUnit, GRUUnitOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GRUUnitGradient, GRUUnitGradientOp<float, CUDAContext>);
}
|
df4eac40d69b9b4a9dcc89b9859eb6cd4ae64506.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Inverse Discrete Cosine Transform in Column wise (DCT three)
* DCT_III_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_III_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
#define DELTA(i, j) ((i==j)?1:0)
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCTIII_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DCTIII_Column_Inverse_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns = numARows;
// numCRows = numARows;
//
// numCColumns = numAColumns;
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i <numDCOSRows ; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numDCOSColumns+ j] = i + j* numAColumns;
// //hostB[i * numDCOSColumns + j] = 1;
// //cosvalx[i * numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// if (i == 0) {
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / numDCOSColumns);
// //pointer[i + j* numDCOSColumns] = 1;
// }
// else if (i != 0) {
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(2.0 / numDCOSColumns);
// //pointer[i + j* numDCOSColumns] = 2;
// }
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DCTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Cosine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// if (j == 0) {
// hostB[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / numBColumns);
// }
// else if (j != 0) {
// hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(2.0 / numBColumns);
// }
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
| df4eac40d69b9b4a9dcc89b9859eb6cd4ae64506.cu | /*
* Inverse Discrete Cosine Transform in Column wise (DCT three)
* DCT_III_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_III_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
#define DELTA(i, j) ((i==j)?1:0)
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCTIII_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DCTIII_Column_Inverse_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns = numARows;
// numCRows = numARows;
//
// numCColumns = numAColumns;
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i <numDCOSRows ; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numDCOSColumns+ j] = i + j* numAColumns;
// //hostB[i * numDCOSColumns + j] = 1;
// //cosvalx[i * numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// if (i == 0) {
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / numDCOSColumns);
// //pointer[i + j* numDCOSColumns] = 1;
// }
// else if (i != 0) {
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(2.0 / numDCOSColumns);
// //pointer[i + j* numDCOSColumns] = 2;
// }
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DCTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Cosine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// if (j == 0) {
// hostB[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / numBColumns);
// }
// else if (j != 0) {
// hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(2.0 / numBColumns);
// }
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
|
729aabc8b11344dbbc26c1686679fe7c8590ccdd.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native { namespace {
enum class EqOpType {EQ, NE};
template<typename scalar_t>
struct CompareEqFunctor{
CompareEqFunctor(EqOpType op): op_(op) {}
const EqOpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == EqOpType::EQ) {
return a == b;
} else { //NE
return a != b;
}
}
};
}
C10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,
iter.common_dtype(), "compare_eq_ne_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, CompareEqFunctor<scalar_t>(op));
});
}
void eq_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::EQ);
}
void ne_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::NE);
}
REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda);
REGISTER_DISPATCH(ne_stub, &ne_kernel_cuda);
} // namespace at::native
| 729aabc8b11344dbbc26c1686679fe7c8590ccdd.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native { namespace {
enum class EqOpType {EQ, NE};
template<typename scalar_t>
struct CompareEqFunctor{
CompareEqFunctor(EqOpType op): op_(op) {}
const EqOpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == EqOpType::EQ) {
return a == b;
} else { //NE
return a != b;
}
}
};
}
C10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,
iter.common_dtype(), "compare_eq_ne_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, CompareEqFunctor<scalar_t>(op));
});
}
void eq_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::EQ);
}
void ne_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::NE);
}
REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda);
REGISTER_DISPATCH(ne_stub, &ne_kernel_cuda);
} // namespace at::native
|
1cc2d7700b122583861c4ab5ea9853d318834d5e.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <xgboost/logging.h>
#include <xgboost/learner.h>
#include <string>
#include "../helpers.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/data/device_adapter.cuh"
#include "test_predictor.h"
namespace xgboost {
namespace predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam param;
param.num_feature = n_col;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &gpu_out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
cpu_predictor->InitOutPredictions(dmat->Info(), &cpu_out_predictions.predictions, model);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols {8};
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
.Bins(bins)
.Device(0)
.GenerateDeviceDMatrix(true);
ASSERT_FALSE(p_m->PageExists<SparsePage>());
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(0)
.GenerateDeviceDMatrix(true);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
.Device(0)
.GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full {
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)
};
TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = 5;
const int n_classes = 3;
param.num_output_group = n_classes;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(400));
dmats.push_back(CreateSparsePageDMatrix(800));
dmats.push_back(CreateSparsePageDMatrix(8000));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_ = decltype(dmat->Info().base_margin_){
{dmat->Info().num_row_, static_cast<size_t>(n_classes)}, 0};
dmat->Info().base_margin_.Data()->Fill(0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, InplacePredictCuDF) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
auto x = std::make_shared<data::CudfAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";
return;
}
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(1);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1);
EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0),
dmlc::Error);
}
TEST(GpuPredictor, LesserFeatures) {
TestPredictionWithLesserFeatures("gpu_predictor");
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
hipSetDevice(0);
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model(¶m, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], param.base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], param.base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], param.base_score);
}
TEST(GPUPredictor, Shap) {
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model(¶m, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
TEST(GPUPredictor, IterationRange) {
TestIterationRange("gpu_predictor");
}
TEST(GPUPredictor, CategoricalPrediction) {
TestCategoricalPrediction("gpu_predictor");
}
TEST(GPUPredictor, CategoricalPredictLeaf) {
TestCategoricalPredictLeaf(StringView{"gpu_predictor"});
}
TEST(GPUPredictor, PredictLeafBasic) {
size_t constexpr kRows = 5, kCols = 5;
auto dmat = RandomDataGenerator(kRows, kCols, 0).Device(0).GenerateDMatrix();
auto lparam = CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = kCols;
param.base_score = 0.0;
param.num_output_group = 1;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx);
HostDeviceVector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
auto const& h_leaf_out_predictions = leaf_out_predictions.ConstHostVector();
for (auto v : h_leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
}
TEST(GPUPredictor, Sparse) {
TestSparsePrediction(0.2, "gpu_predictor");
TestSparsePrediction(0.8, "gpu_predictor");
}
} // namespace predictor
} // namespace xgboost
| 1cc2d7700b122583861c4ab5ea9853d318834d5e.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <xgboost/logging.h>
#include <xgboost/learner.h>
#include <string>
#include "../helpers.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/data/device_adapter.cuh"
#include "test_predictor.h"
namespace xgboost {
namespace predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam param;
param.num_feature = n_col;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &gpu_out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
cpu_predictor->InitOutPredictions(dmat->Info(), &cpu_out_predictions.predictions, model);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols {8};
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
.Bins(bins)
.Device(0)
.GenerateDeviceDMatrix(true);
ASSERT_FALSE(p_m->PageExists<SparsePage>());
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(0)
.GenerateDeviceDMatrix(true);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
.Device(0)
.GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full {
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)
};
TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = 5;
const int n_classes = 3;
param.num_output_group = n_classes;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(400));
dmats.push_back(CreateSparsePageDMatrix(800));
dmats.push_back(CreateSparsePageDMatrix(8000));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_ = decltype(dmat->Info().base_margin_){
{dmat->Info().num_row_, static_cast<size_t>(n_classes)}, 0};
dmat->Info().base_margin_.Data()->Fill(0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, InplacePredictCuDF) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
auto x = std::make_shared<data::CudfAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";
return;
}
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(1);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1);
EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0),
dmlc::Error);
}
TEST(GpuPredictor, LesserFeatures) {
TestPredictionWithLesserFeatures("gpu_predictor");
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
cudaSetDevice(0);
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model(¶m, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], param.base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], param.base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], param.base_score);
}
TEST(GPUPredictor, Shap) {
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model(¶m, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
TEST(GPUPredictor, IterationRange) {
TestIterationRange("gpu_predictor");
}
TEST(GPUPredictor, CategoricalPrediction) {
TestCategoricalPrediction("gpu_predictor");
}
TEST(GPUPredictor, CategoricalPredictLeaf) {
TestCategoricalPredictLeaf(StringView{"gpu_predictor"});
}
TEST(GPUPredictor, PredictLeafBasic) {
size_t constexpr kRows = 5, kCols = 5;
auto dmat = RandomDataGenerator(kRows, kCols, 0).Device(0).GenerateDMatrix();
auto lparam = CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = kCols;
param.base_score = 0.0;
param.num_output_group = 1;
GenericParameter ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(¶m, &ctx);
HostDeviceVector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
auto const& h_leaf_out_predictions = leaf_out_predictions.ConstHostVector();
for (auto v : h_leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
}
TEST(GPUPredictor, Sparse) {
TestSparsePrediction(0.2, "gpu_predictor");
TestSparsePrediction(0.8, "gpu_predictor");
}
} // namespace predictor
} // namespace xgboost
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.