hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
26037774a04d8a577058170a5ca350fb4a843f2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSAnchorBackPropagateSSD_NegSamples_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *inputCls = NULL;
hipMalloc(&inputCls, XSIZE*YSIZE);
float *diffOutputsCls = NULL;
hipMalloc(&diffOutputsCls, XSIZE*YSIZE);
const float *confSamples = NULL;
hipMalloc(&confSamples, XSIZE*YSIZE);
const int *keySamples = NULL;
hipMalloc(&keySamples, XSIZE*YSIZE);
const int nbSamples = 1;
const int nbPositive = 1;
const unsigned int nbAnchors = 1;
const unsigned int outputsHeight = 1;
const unsigned int outputsWidth = 1;
const unsigned int batchSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSAnchorBackPropagateSSD_NegSamples_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSAnchorBackPropagateSSD_NegSamples_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSAnchorBackPropagateSSD_NegSamples_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 26037774a04d8a577058170a5ca350fb4a843f2c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSAnchorBackPropagateSSD_NegSamples_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *inputCls = NULL;
cudaMalloc(&inputCls, XSIZE*YSIZE);
float *diffOutputsCls = NULL;
cudaMalloc(&diffOutputsCls, XSIZE*YSIZE);
const float *confSamples = NULL;
cudaMalloc(&confSamples, XSIZE*YSIZE);
const int *keySamples = NULL;
cudaMalloc(&keySamples, XSIZE*YSIZE);
const int nbSamples = 1;
const int nbPositive = 1;
const unsigned int nbAnchors = 1;
const unsigned int outputsHeight = 1;
const unsigned int outputsWidth = 1;
const unsigned int batchSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSAnchorBackPropagateSSD_NegSamples_kernel<<<gridBlock,threadBlock>>>(inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSAnchorBackPropagateSSD_NegSamples_kernel<<<gridBlock,threadBlock>>>(inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSAnchorBackPropagateSSD_NegSamples_kernel<<<gridBlock,threadBlock>>>(inputCls,diffOutputsCls,confSamples,keySamples,nbSamples,nbPositive,nbAnchors,outputsHeight,outputsWidth,batchSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ed31371e443a39ba92519bb4c6872779f36748d6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @file mtgp32-cuda.cu
*
* @brief Sample Program for CUDA 3.2 and 4.0
*
* MTGP32-11213
* This program generates 32-bit unsigned integers.
* The period of generated integers is 2<sup>11213</sup>-1.
*
* This also generates single precision floating point numbers
* uniformly distributed in the range [1, 2). (float r; 1.0 <= r < 2.0)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <inttypes.h>
#include <errno.h>
#include <stdlib.h>
#include "mtgp-util.cuh"
#include "mtgp32-fast.h"
#define MTGPDC_MEXP 11213
#define MTGPDC_N 351
#define MTGPDC_FLOOR_2P 256
#define MTGPDC_CEIL_2P 512
#define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213
#define MEXP 11213
#define THREAD_NUM MTGPDC_FLOOR_2P
#define LARGE_SIZE (THREAD_NUM * 3)
#define BLOCK_NUM_MAX 200
#define TBL_SIZE 16
#define N MTGPDC_N
extern mtgp32_params_fast_t mtgp32dc_params_fast_11213[];
/**
* kernel I/O
* This structure must be initialized before first use.
*/
struct mtgp32_kernel_status_t {
uint32_t status[MTGPDC_N];
};
/*
* Generator Parameters.
*/
__constant__ unsigned int pos_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t param_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t temper_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t single_temper_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t sh1_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t sh2_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t mask[1];
/**
* Shared memory
* The generator's internal status vector.
*/
__shared__ uint32_t status[LARGE_SIZE];
/**
* The function of the recursion formula calculation.
*
* @param[in] X1 the farthest part of state array.
* @param[in] X2 the second farthest part of state array.
* @param[in] Y a part of state array.
* @param[in] bid block id.
* @return output
*/
__device__ uint32_t para_rec(uint32_t X1, uint32_t X2, uint32_t Y, int bid) {
uint32_t X = (X1 & mask[0]) ^ X2;
uint32_t MAT;
X ^= X << sh1_tbl[bid];
Y = X ^ (Y >> sh2_tbl[bid]);
MAT = param_tbl[bid][Y & 0x0f];
return Y ^ MAT;
}
/**
* The tempering function.
*
* @param[in] V the output value should be tempered.
* @param[in] T the tempering helper value.
* @param[in] bid block id.
* @return the tempered value.
*/
__device__ uint32_t temper(uint32_t V, uint32_t T, int bid) {
uint32_t MAT;
T ^= T >> 16;
T ^= T >> 8;
MAT = temper_tbl[bid][T & 0x0f];
return V ^ MAT;
}
/**
* The tempering and converting function.
* By using the preset-ted table, converting to IEEE format
* and tempering are done simultaneously.
*
* @param[in] V the output value should be tempered.
* @param[in] T the tempering helper value.
* @param[in] bid block id.
* @return the tempered and converted value.
*/
__device__ uint32_t temper_single(uint32_t V, uint32_t T, int bid) {
uint32_t MAT;
uint32_t r;
T ^= T >> 16;
T ^= T >> 8;
MAT = single_temper_tbl[bid][T & 0x0f];
r = (V >> 9) ^ MAT;
return r;
}
/**
* Read the internal state vector from kernel I/O data, and
* put them into shared memory.
*
* @param[out] status shared memory.
* @param[in] d_status kernel I/O data
* @param[in] bid block id
* @param[in] tid thread id
*/
__device__ void status_read(uint32_t status[LARGE_SIZE],
const mtgp32_kernel_status_t *d_status,
int bid,
int tid) {
status[LARGE_SIZE - N + tid] = d_status[bid].status[tid];
if (tid < N - THREAD_NUM) {
status[LARGE_SIZE - N + THREAD_NUM + tid]
= d_status[bid].status[THREAD_NUM + tid];
}
__syncthreads();
}
/**
* Read the internal state vector from shared memory, and
* write them into kernel I/O data.
*
* @param[out] d_status kernel I/O data
* @param[in] status shared memory.
* @param[in] bid block id
* @param[in] tid thread id
*/
__device__ void status_write(mtgp32_kernel_status_t *d_status,
const uint32_t status[LARGE_SIZE],
int bid,
int tid) {
d_status[bid].status[tid] = status[LARGE_SIZE - N + tid];
if (tid < N - THREAD_NUM) {
d_status[bid].status[THREAD_NUM + tid]
= status[4 * THREAD_NUM - N + tid];
}
__syncthreads();
}
/**
* kernel function.
* This function generates 32-bit unsigned integers in d_data
*
* @param[in,out] d_status kernel I/O data
* @param[out] d_data output
* @param[in] size number of output data requested.
*/
__global__ void mtgp32_uint32_kernel(mtgp32_kernel_status_t* d_status,
uint32_t* d_data, int size) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
int pos = pos_tbl[bid];
uint32_t r;
uint32_t o;
// copy status data from global memory to shared memory.
status_read(status, d_status, bid, tid);
// main loop
for (int i = 0; i < size; i += LARGE_SIZE) {
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("status[LARGE_SIZE - N + tid]:%08x\n",
status[LARGE_SIZE - N + tid]);
printf("status[LARGE_SIZE - N + tid + 1]:%08x\n",
status[LARGE_SIZE - N + tid + 1]);
printf("status[LARGE_SIZE - N + tid + pos]:%08x\n",
status[LARGE_SIZE - N + tid + pos]);
printf("sh1:%d\n", sh1_tbl[bid]);
printf("sh2:%d\n", sh2_tbl[bid]);
printf("mask:%08x\n", mask[0]);
for (int j = 0; j < 16; j++) {
printf("tbl[%d]:%08x\n", j, param_tbl[0][j]);
}
}
#endif
r = para_rec(status[LARGE_SIZE - N + tid],
status[LARGE_SIZE - N + tid + 1],
status[LARGE_SIZE - N + tid + pos],
bid);
status[tid] = r;
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("status[tid]:%08x\n", status[tid]);
}
#endif
o = temper(r, status[LARGE_SIZE - N + tid + pos - 1], bid);
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("r:%08" PRIx32 "\n", r);
}
#endif
d_data[size * bid + i + tid] = o;
__syncthreads();
r = para_rec(status[(4 * THREAD_NUM - N + tid) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + 1) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + pos) % LARGE_SIZE],
bid);
status[tid + THREAD_NUM] = r;
o = temper(r,
status[(4 * THREAD_NUM - N + tid + pos - 1) % LARGE_SIZE],
bid);
d_data[size * bid + THREAD_NUM + i + tid] = o;
__syncthreads();
r = para_rec(status[2 * THREAD_NUM - N + tid],
status[2 * THREAD_NUM - N + tid + 1],
status[2 * THREAD_NUM - N + tid + pos],
bid);
status[tid + 2 * THREAD_NUM] = r;
o = temper(r, status[tid + pos - 1 + 2 * THREAD_NUM - N], bid);
d_data[size * bid + 2 * THREAD_NUM + i + tid] = o;
__syncthreads();
}
// write back status for next call
status_write(d_status, status, bid, tid);
}
/**
* kernel function.
* This function generates single precision floating point numbers in d_data.
*
* @param[in,out] d_status kernel I/O data
* @param[out] d_data output. IEEE single precision format.
* @param[in] size number of output data requested.
*/
__global__ void mtgp32_single_kernel(mtgp32_kernel_status_t* d_status,
uint32_t* d_data, int size)
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
int pos = pos_tbl[bid];
uint32_t r;
uint32_t o;
// copy status data from global memory to shared memory.
status_read(status, d_status, bid, tid);
// main loop
for (int i = 0; i < size; i += LARGE_SIZE) {
r = para_rec(status[LARGE_SIZE - N + tid],
status[LARGE_SIZE - N + tid + 1],
status[LARGE_SIZE - N + tid + pos],
bid);
status[tid] = r;
o = temper_single(r, status[LARGE_SIZE - N + tid + pos - 1], bid);
d_data[size * bid + i + tid] = o;
__syncthreads();
r = para_rec(status[(4 * THREAD_NUM - N + tid) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + 1) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + pos) % LARGE_SIZE],
bid);
status[tid + THREAD_NUM] = r;
o = temper_single(
r,
status[(4 * THREAD_NUM - N + tid + pos - 1) % LARGE_SIZE],
bid);
d_data[size * bid + THREAD_NUM + i + tid] = o;
__syncthreads();
r = para_rec(status[2 * THREAD_NUM - N + tid],
status[2 * THREAD_NUM - N + tid + 1],
status[2 * THREAD_NUM - N + tid + pos],
bid);
status[tid + 2 * THREAD_NUM] = r;
o = temper_single(r,
status[tid + pos - 1 + 2 * THREAD_NUM - N],
bid);
d_data[size * bid + 2 * THREAD_NUM + i + tid] = o;
__syncthreads();
}
// write back status for next call
status_write(d_status, status, bid, tid);
}
/**
* This function initializes kernel I/O data.
* @param d_status output kernel I/O data.
* @param params MTGP32 parameters. needed for the initialization.
*/
void make_kernel_data32(mtgp32_kernel_status_t * d_status,
mtgp32_params_fast_t params[],
int block_num)
{
int i;
mtgp32_kernel_status_t* h_status
= (mtgp32_kernel_status_t *) malloc(
sizeof(mtgp32_kernel_status_t) * block_num);
if (h_status == NULL) {
printf("failure in allocating host memory for kernel I/O data.\n");
exit(8);
}
for (i = 0; i < block_num; i++) {
mtgp32_init_state(&(h_status[i].status[0]), ¶ms[i], i + 1);
}
#if defined(DEBUG)
printf("h_status[0].status[0]:%08"PRIx32"\n", h_status[0].status[0]);
printf("h_status[0].status[1]:%08"PRIx32"\n", h_status[0].status[1]);
printf("h_status[0].status[2]:%08"PRIx32"\n", h_status[0].status[2]);
printf("h_status[0].status[3]:%08"PRIx32"\n", h_status[0].status[3]);
#endif
ccudaMemcpy(d_status, h_status,
sizeof(mtgp32_kernel_status_t) * block_num,
hipMemcpyHostToDevice);
free(h_status);
}
/**
* This function sets constants in device memory.
* @param[in] params input, MTGP32 parameters.
*/
void make_constant(const mtgp32_params_fast_t params[],
int block_num) {
const int size1 = sizeof(uint32_t) * block_num;
const int size2 = sizeof(uint32_t) * block_num * TBL_SIZE;
uint32_t *h_pos_tbl;
uint32_t *h_sh1_tbl;
uint32_t *h_sh2_tbl;
uint32_t *h_param_tbl;
uint32_t *h_temper_tbl;
uint32_t *h_single_temper_tbl;
uint32_t *h_mask;
h_pos_tbl = (uint32_t *)malloc(size1);
h_sh1_tbl = (uint32_t *)malloc(size1);
h_sh2_tbl = (uint32_t *)malloc(size1);
h_param_tbl = (uint32_t *)malloc(size2);
h_temper_tbl = (uint32_t *)malloc(size2);
h_single_temper_tbl = (uint32_t *)malloc(size2);
h_mask = (uint32_t *)malloc(sizeof(uint32_t));
if (h_pos_tbl == NULL
|| h_sh1_tbl == NULL
|| h_sh2_tbl == NULL
|| h_param_tbl == NULL
|| h_temper_tbl == NULL
|| h_single_temper_tbl == NULL
|| h_mask == NULL
) {
printf("failure in allocating host memory for constant table.\n");
exit(1);
}
h_mask[0] = params[0].mask;
for (int i = 0; i < block_num; i++) {
h_pos_tbl[i] = params[i].pos;
h_sh1_tbl[i] = params[i].sh1;
h_sh2_tbl[i] = params[i].sh2;
for (int j = 0; j < TBL_SIZE; j++) {
h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
}
}
ccudaMemcpyToSymbol(pos_tbl, h_pos_tbl, size1);
ccudaMemcpyToSymbol(sh1_tbl, h_sh1_tbl, size1);
ccudaMemcpyToSymbol(sh2_tbl, h_sh2_tbl, size1);
ccudaMemcpyToSymbol(param_tbl, h_param_tbl, size2);
ccudaMemcpyToSymbol(temper_tbl, h_temper_tbl, size2);
ccudaMemcpyToSymbol(single_temper_tbl, h_single_temper_tbl, size2);
ccudaMemcpyToSymbol(mask, h_mask, sizeof(uint32_t));
free(h_pos_tbl);
free(h_sh1_tbl);
free(h_sh2_tbl);
free(h_param_tbl);
free(h_temper_tbl);
free(h_single_temper_tbl);
free(h_mask);
}
/**
* host function.
* This function calls corresponding kernel function.
*
* @param[in] d_status kernel I/O data.
* @param[in] num_data number of data to be generated.
*/
void make_uint32_random(mtgp32_kernel_status_t* d_status,
int num_data,
int block_num) {
uint32_t* d_data;
uint32_t* h_data;
hipError_t e;
float gputime;
hipEvent_t start;
hipEvent_t end;
printf("generating 32-bit unsigned random numbers.\n");
ccudaMalloc((void**)&d_data, sizeof(uint32_t) * num_data);
/* cutCreateTimer(&timer); */
ccudaEventCreate(&start);
ccudaEventCreate(&end);
h_data = (uint32_t *) malloc(sizeof(uint32_t) * num_data);
if (h_data == NULL) {
printf("failure in allocating host memory for output data.\n");
exit(1);
}
/* cutStartTimer(timer); */
ccudaEventRecord(start, 0);
if (hipGetLastError() != hipSuccess) {
printf("error has been occured before kernel call.\n");
exit(1);
}
/* kernel call */
hipLaunchKernelGGL(( mtgp32_uint32_kernel), dim3(block_num), dim3(THREAD_NUM), 0, 0,
d_status, d_data, num_data / block_num);
hipDeviceSynchronize();
e = hipGetLastError();
if (e != hipSuccess) {
printf("failure in kernel call.\n%s\n", hipGetErrorString(e));
exit(1);
}
/* ccutStopTimer(timer); */
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaMemcpy(h_data, d_data,
sizeof(uint32_t) * num_data, hipMemcpyDeviceToHost);
/* gputime = cutGetTimerValue(timer); */
ccudaEventElapsedTime(&gputime, start, end);
print_uint32_array(h_data, num_data, block_num);
printf("generated numbers: %d\n", num_data);
printf("Processing time: %f (ms)\n", gputime);
printf("Samples per second: %E \n", num_data / (gputime * 0.001));
/* ccutDeleteTimer(timer); */
ccudaEventDestroy(start);
ccudaEventDestroy(end);
//free memories
free(h_data);
ccudaFree(d_data);
}
/**
* host function.
* This function calls corresponding kernel function.
*
* @param[in] d_status kernel I/O data.
* @param[in] num_data number of data to be generated.
*/
void make_single_random(mtgp32_kernel_status_t* d_status,
int num_data,
int block_num) {
uint32_t* d_data;
float* h_data;
hipError_t e;
float gputime;
hipEvent_t start;
hipEvent_t end;
printf("generating single precision floating point random numbers.\n");
ccudaMalloc((void**)&d_data, sizeof(uint32_t) * num_data);
/* ccutCreateTimer(&timer); */
ccudaEventCreate(&start);
ccudaEventCreate(&end);
h_data = (float *) malloc(sizeof(float) * num_data);
if (h_data == NULL) {
printf("failure in allocating host memory for output data.\n");
exit(1);
}
/* ccutStartTimer(timer); */
ccudaEventRecord(start, 0);
if (hipGetLastError() != hipSuccess) {
printf("error has been occured before kernel call.\n");
exit(1);
}
/* kernel call */
hipLaunchKernelGGL(( mtgp32_single_kernel), dim3(block_num), dim3(THREAD_NUM) , 0, 0,
d_status, d_data, num_data / block_num);
hipDeviceSynchronize();
e = hipGetLastError();
if (e != hipSuccess) {
printf("failure in kernel call.\n%s\n", hipGetErrorString(e));
exit(1);
}
/* ccutStopTimer(timer); */
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaMemcpy(h_data, d_data, sizeof(uint32_t) * num_data,
hipMemcpyDeviceToHost);
/* gputime = cutGetTimerValue(timer); */
ccudaEventElapsedTime(&gputime, start, end);
print_float_array(h_data, num_data, block_num);
printf("generated numbers: %d\n", num_data);
printf("Processing time: %f (ms)\n", gputime);
printf("Samples per second: %E \n", num_data / (gputime * 0.001));
/* ccutDeleteTimer(timer); */
ccudaEventDestroy(start);
ccudaEventDestroy(end);
//free memories
free(h_data);
ccudaFree(d_data);
}
int main(int argc, char** argv)
{
// LARGE_SIZE is a multiple of 16
int num_data = 10000000;
int block_num;
int num_unit;
int r;
int device = 0;
mtgp32_kernel_status_t *d_status;
int mb, mp;
ccudaSetDevice(device);
if (argc >= 2) {
errno = 0;
block_num = strtol(argv[1], NULL, 10);
if (errno) {
printf("%s number_of_block number_of_output\n", argv[0]);
return 1;
}
if (block_num < 1 || block_num > BLOCK_NUM_MAX) {
printf("%s block_num should be between 1 and %d\n",
argv[0], BLOCK_NUM_MAX);
return 1;
}
errno = 0;
num_data = strtol(argv[2], NULL, 10);
if (errno) {
printf("%s number_of_block number_of_output\n", argv[0]);
return 1;
}
argc -= 2;
argv += 2;
} else {
printf("%s number_of_block number_of_output\n", argv[0]);
block_num = get_suitable_block_num(device,
&mb,
&mp,
sizeof(uint32_t),
THREAD_NUM,
LARGE_SIZE);
if (block_num <= 0) {
printf("can't calculate sutable number of blocks.\n");
return 1;
}
printf("the suitable number of blocks for device 0 "
"will be multiple of %d, or multiple of %d\n", block_num,
(mb - 1) * mp);
return 1;
}
num_unit = LARGE_SIZE * block_num;
ccudaMalloc((void**)&d_status,
sizeof(mtgp32_kernel_status_t) * block_num);
r = num_data % num_unit;
if (r != 0) {
num_data = num_data + num_unit - r;
}
make_constant(MTGPDC_PARAM_TABLE, block_num);
make_kernel_data32(d_status, MTGPDC_PARAM_TABLE, block_num);
make_uint32_random(d_status, num_data, block_num);
make_single_random(d_status, num_data, block_num);
ccudaFree(d_status);
}
| ed31371e443a39ba92519bb4c6872779f36748d6.cu | /*
* @file mtgp32-cuda.cu
*
* @brief Sample Program for CUDA 3.2 and 4.0
*
* MTGP32-11213
* This program generates 32-bit unsigned integers.
* The period of generated integers is 2<sup>11213</sup>-1.
*
* This also generates single precision floating point numbers
* uniformly distributed in the range [1, 2). (float r; 1.0 <= r < 2.0)
*/
#include <stdio.h>
#include <cuda.h>
#include <stdint.h>
#include <inttypes.h>
#include <errno.h>
#include <stdlib.h>
#include "mtgp-util.cuh"
#include "mtgp32-fast.h"
#define MTGPDC_MEXP 11213
#define MTGPDC_N 351
#define MTGPDC_FLOOR_2P 256
#define MTGPDC_CEIL_2P 512
#define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213
#define MEXP 11213
#define THREAD_NUM MTGPDC_FLOOR_2P
#define LARGE_SIZE (THREAD_NUM * 3)
#define BLOCK_NUM_MAX 200
#define TBL_SIZE 16
#define N MTGPDC_N
extern mtgp32_params_fast_t mtgp32dc_params_fast_11213[];
/**
* kernel I/O
* This structure must be initialized before first use.
*/
struct mtgp32_kernel_status_t {
uint32_t status[MTGPDC_N];
};
/*
* Generator Parameters.
*/
__constant__ unsigned int pos_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t param_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t temper_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t single_temper_tbl[BLOCK_NUM_MAX][TBL_SIZE];
__constant__ uint32_t sh1_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t sh2_tbl[BLOCK_NUM_MAX];
__constant__ uint32_t mask[1];
/**
* Shared memory
* The generator's internal status vector.
*/
__shared__ uint32_t status[LARGE_SIZE];
/**
* The function of the recursion formula calculation.
*
* @param[in] X1 the farthest part of state array.
* @param[in] X2 the second farthest part of state array.
* @param[in] Y a part of state array.
* @param[in] bid block id.
* @return output
*/
__device__ uint32_t para_rec(uint32_t X1, uint32_t X2, uint32_t Y, int bid) {
uint32_t X = (X1 & mask[0]) ^ X2;
uint32_t MAT;
X ^= X << sh1_tbl[bid];
Y = X ^ (Y >> sh2_tbl[bid]);
MAT = param_tbl[bid][Y & 0x0f];
return Y ^ MAT;
}
/**
* The tempering function.
*
* @param[in] V the output value should be tempered.
* @param[in] T the tempering helper value.
* @param[in] bid block id.
* @return the tempered value.
*/
__device__ uint32_t temper(uint32_t V, uint32_t T, int bid) {
uint32_t MAT;
T ^= T >> 16;
T ^= T >> 8;
MAT = temper_tbl[bid][T & 0x0f];
return V ^ MAT;
}
/**
* The tempering and converting function.
* By using the preset-ted table, converting to IEEE format
* and tempering are done simultaneously.
*
* @param[in] V the output value should be tempered.
* @param[in] T the tempering helper value.
* @param[in] bid block id.
* @return the tempered and converted value.
*/
__device__ uint32_t temper_single(uint32_t V, uint32_t T, int bid) {
uint32_t MAT;
uint32_t r;
T ^= T >> 16;
T ^= T >> 8;
MAT = single_temper_tbl[bid][T & 0x0f];
r = (V >> 9) ^ MAT;
return r;
}
/**
* Read the internal state vector from kernel I/O data, and
* put them into shared memory.
*
* @param[out] status shared memory.
* @param[in] d_status kernel I/O data
* @param[in] bid block id
* @param[in] tid thread id
*/
__device__ void status_read(uint32_t status[LARGE_SIZE],
const mtgp32_kernel_status_t *d_status,
int bid,
int tid) {
status[LARGE_SIZE - N + tid] = d_status[bid].status[tid];
if (tid < N - THREAD_NUM) {
status[LARGE_SIZE - N + THREAD_NUM + tid]
= d_status[bid].status[THREAD_NUM + tid];
}
__syncthreads();
}
/**
* Read the internal state vector from shared memory, and
* write them into kernel I/O data.
*
* @param[out] d_status kernel I/O data
* @param[in] status shared memory.
* @param[in] bid block id
* @param[in] tid thread id
*/
__device__ void status_write(mtgp32_kernel_status_t *d_status,
const uint32_t status[LARGE_SIZE],
int bid,
int tid) {
d_status[bid].status[tid] = status[LARGE_SIZE - N + tid];
if (tid < N - THREAD_NUM) {
d_status[bid].status[THREAD_NUM + tid]
= status[4 * THREAD_NUM - N + tid];
}
__syncthreads();
}
/**
* kernel function.
* This function generates 32-bit unsigned integers in d_data
*
* @param[in,out] d_status kernel I/O data
* @param[out] d_data output
* @param[in] size number of output data requested.
*/
__global__ void mtgp32_uint32_kernel(mtgp32_kernel_status_t* d_status,
uint32_t* d_data, int size) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
int pos = pos_tbl[bid];
uint32_t r;
uint32_t o;
// copy status data from global memory to shared memory.
status_read(status, d_status, bid, tid);
// main loop
for (int i = 0; i < size; i += LARGE_SIZE) {
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("status[LARGE_SIZE - N + tid]:%08x\n",
status[LARGE_SIZE - N + tid]);
printf("status[LARGE_SIZE - N + tid + 1]:%08x\n",
status[LARGE_SIZE - N + tid + 1]);
printf("status[LARGE_SIZE - N + tid + pos]:%08x\n",
status[LARGE_SIZE - N + tid + pos]);
printf("sh1:%d\n", sh1_tbl[bid]);
printf("sh2:%d\n", sh2_tbl[bid]);
printf("mask:%08x\n", mask[0]);
for (int j = 0; j < 16; j++) {
printf("tbl[%d]:%08x\n", j, param_tbl[0][j]);
}
}
#endif
r = para_rec(status[LARGE_SIZE - N + tid],
status[LARGE_SIZE - N + tid + 1],
status[LARGE_SIZE - N + tid + pos],
bid);
status[tid] = r;
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("status[tid]:%08x\n", status[tid]);
}
#endif
o = temper(r, status[LARGE_SIZE - N + tid + pos - 1], bid);
#if defined(DEBUG) && defined(__DEVICE_EMULATION__)
if ((i == 0) && (bid == 0) && (tid <= 1)) {
printf("r:%08" PRIx32 "\n", r);
}
#endif
d_data[size * bid + i + tid] = o;
__syncthreads();
r = para_rec(status[(4 * THREAD_NUM - N + tid) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + 1) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + pos) % LARGE_SIZE],
bid);
status[tid + THREAD_NUM] = r;
o = temper(r,
status[(4 * THREAD_NUM - N + tid + pos - 1) % LARGE_SIZE],
bid);
d_data[size * bid + THREAD_NUM + i + tid] = o;
__syncthreads();
r = para_rec(status[2 * THREAD_NUM - N + tid],
status[2 * THREAD_NUM - N + tid + 1],
status[2 * THREAD_NUM - N + tid + pos],
bid);
status[tid + 2 * THREAD_NUM] = r;
o = temper(r, status[tid + pos - 1 + 2 * THREAD_NUM - N], bid);
d_data[size * bid + 2 * THREAD_NUM + i + tid] = o;
__syncthreads();
}
// write back status for next call
status_write(d_status, status, bid, tid);
}
/**
* kernel function.
* This function generates single precision floating point numbers in d_data.
*
* @param[in,out] d_status kernel I/O data
* @param[out] d_data output. IEEE single precision format.
* @param[in] size number of output data requested.
*/
__global__ void mtgp32_single_kernel(mtgp32_kernel_status_t* d_status,
uint32_t* d_data, int size)
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
int pos = pos_tbl[bid];
uint32_t r;
uint32_t o;
// copy status data from global memory to shared memory.
status_read(status, d_status, bid, tid);
// main loop
for (int i = 0; i < size; i += LARGE_SIZE) {
r = para_rec(status[LARGE_SIZE - N + tid],
status[LARGE_SIZE - N + tid + 1],
status[LARGE_SIZE - N + tid + pos],
bid);
status[tid] = r;
o = temper_single(r, status[LARGE_SIZE - N + tid + pos - 1], bid);
d_data[size * bid + i + tid] = o;
__syncthreads();
r = para_rec(status[(4 * THREAD_NUM - N + tid) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + 1) % LARGE_SIZE],
status[(4 * THREAD_NUM - N + tid + pos) % LARGE_SIZE],
bid);
status[tid + THREAD_NUM] = r;
o = temper_single(
r,
status[(4 * THREAD_NUM - N + tid + pos - 1) % LARGE_SIZE],
bid);
d_data[size * bid + THREAD_NUM + i + tid] = o;
__syncthreads();
r = para_rec(status[2 * THREAD_NUM - N + tid],
status[2 * THREAD_NUM - N + tid + 1],
status[2 * THREAD_NUM - N + tid + pos],
bid);
status[tid + 2 * THREAD_NUM] = r;
o = temper_single(r,
status[tid + pos - 1 + 2 * THREAD_NUM - N],
bid);
d_data[size * bid + 2 * THREAD_NUM + i + tid] = o;
__syncthreads();
}
// write back status for next call
status_write(d_status, status, bid, tid);
}
/**
* This function initializes kernel I/O data.
* @param d_status output kernel I/O data.
* @param params MTGP32 parameters. needed for the initialization.
*/
void make_kernel_data32(mtgp32_kernel_status_t * d_status,
mtgp32_params_fast_t params[],
int block_num)
{
int i;
mtgp32_kernel_status_t* h_status
= (mtgp32_kernel_status_t *) malloc(
sizeof(mtgp32_kernel_status_t) * block_num);
if (h_status == NULL) {
printf("failure in allocating host memory for kernel I/O data.\n");
exit(8);
}
for (i = 0; i < block_num; i++) {
mtgp32_init_state(&(h_status[i].status[0]), ¶ms[i], i + 1);
}
#if defined(DEBUG)
printf("h_status[0].status[0]:%08"PRIx32"\n", h_status[0].status[0]);
printf("h_status[0].status[1]:%08"PRIx32"\n", h_status[0].status[1]);
printf("h_status[0].status[2]:%08"PRIx32"\n", h_status[0].status[2]);
printf("h_status[0].status[3]:%08"PRIx32"\n", h_status[0].status[3]);
#endif
ccudaMemcpy(d_status, h_status,
sizeof(mtgp32_kernel_status_t) * block_num,
cudaMemcpyHostToDevice);
free(h_status);
}
/**
* This function sets constants in device memory.
* @param[in] params input, MTGP32 parameters.
*/
void make_constant(const mtgp32_params_fast_t params[],
int block_num) {
const int size1 = sizeof(uint32_t) * block_num;
const int size2 = sizeof(uint32_t) * block_num * TBL_SIZE;
uint32_t *h_pos_tbl;
uint32_t *h_sh1_tbl;
uint32_t *h_sh2_tbl;
uint32_t *h_param_tbl;
uint32_t *h_temper_tbl;
uint32_t *h_single_temper_tbl;
uint32_t *h_mask;
h_pos_tbl = (uint32_t *)malloc(size1);
h_sh1_tbl = (uint32_t *)malloc(size1);
h_sh2_tbl = (uint32_t *)malloc(size1);
h_param_tbl = (uint32_t *)malloc(size2);
h_temper_tbl = (uint32_t *)malloc(size2);
h_single_temper_tbl = (uint32_t *)malloc(size2);
h_mask = (uint32_t *)malloc(sizeof(uint32_t));
if (h_pos_tbl == NULL
|| h_sh1_tbl == NULL
|| h_sh2_tbl == NULL
|| h_param_tbl == NULL
|| h_temper_tbl == NULL
|| h_single_temper_tbl == NULL
|| h_mask == NULL
) {
printf("failure in allocating host memory for constant table.\n");
exit(1);
}
h_mask[0] = params[0].mask;
for (int i = 0; i < block_num; i++) {
h_pos_tbl[i] = params[i].pos;
h_sh1_tbl[i] = params[i].sh1;
h_sh2_tbl[i] = params[i].sh2;
for (int j = 0; j < TBL_SIZE; j++) {
h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
}
}
ccudaMemcpyToSymbol(pos_tbl, h_pos_tbl, size1);
ccudaMemcpyToSymbol(sh1_tbl, h_sh1_tbl, size1);
ccudaMemcpyToSymbol(sh2_tbl, h_sh2_tbl, size1);
ccudaMemcpyToSymbol(param_tbl, h_param_tbl, size2);
ccudaMemcpyToSymbol(temper_tbl, h_temper_tbl, size2);
ccudaMemcpyToSymbol(single_temper_tbl, h_single_temper_tbl, size2);
ccudaMemcpyToSymbol(mask, h_mask, sizeof(uint32_t));
free(h_pos_tbl);
free(h_sh1_tbl);
free(h_sh2_tbl);
free(h_param_tbl);
free(h_temper_tbl);
free(h_single_temper_tbl);
free(h_mask);
}
/**
* host function.
* This function calls corresponding kernel function.
*
* @param[in] d_status kernel I/O data.
* @param[in] num_data number of data to be generated.
*/
void make_uint32_random(mtgp32_kernel_status_t* d_status,
int num_data,
int block_num) {
uint32_t* d_data;
uint32_t* h_data;
cudaError_t e;
float gputime;
cudaEvent_t start;
cudaEvent_t end;
printf("generating 32-bit unsigned random numbers.\n");
ccudaMalloc((void**)&d_data, sizeof(uint32_t) * num_data);
/* cutCreateTimer(&timer); */
ccudaEventCreate(&start);
ccudaEventCreate(&end);
h_data = (uint32_t *) malloc(sizeof(uint32_t) * num_data);
if (h_data == NULL) {
printf("failure in allocating host memory for output data.\n");
exit(1);
}
/* cutStartTimer(timer); */
ccudaEventRecord(start, 0);
if (cudaGetLastError() != cudaSuccess) {
printf("error has been occured before kernel call.\n");
exit(1);
}
/* kernel call */
mtgp32_uint32_kernel<<< block_num, THREAD_NUM>>>(
d_status, d_data, num_data / block_num);
cudaThreadSynchronize();
e = cudaGetLastError();
if (e != cudaSuccess) {
printf("failure in kernel call.\n%s\n", cudaGetErrorString(e));
exit(1);
}
/* ccutStopTimer(timer); */
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaMemcpy(h_data, d_data,
sizeof(uint32_t) * num_data, cudaMemcpyDeviceToHost);
/* gputime = cutGetTimerValue(timer); */
ccudaEventElapsedTime(&gputime, start, end);
print_uint32_array(h_data, num_data, block_num);
printf("generated numbers: %d\n", num_data);
printf("Processing time: %f (ms)\n", gputime);
printf("Samples per second: %E \n", num_data / (gputime * 0.001));
/* ccutDeleteTimer(timer); */
ccudaEventDestroy(start);
ccudaEventDestroy(end);
//free memories
free(h_data);
ccudaFree(d_data);
}
/**
* host function.
* This function calls corresponding kernel function.
*
* @param[in] d_status kernel I/O data.
* @param[in] num_data number of data to be generated.
*/
void make_single_random(mtgp32_kernel_status_t* d_status,
int num_data,
int block_num) {
uint32_t* d_data;
float* h_data;
cudaError_t e;
float gputime;
cudaEvent_t start;
cudaEvent_t end;
printf("generating single precision floating point random numbers.\n");
ccudaMalloc((void**)&d_data, sizeof(uint32_t) * num_data);
/* ccutCreateTimer(&timer); */
ccudaEventCreate(&start);
ccudaEventCreate(&end);
h_data = (float *) malloc(sizeof(float) * num_data);
if (h_data == NULL) {
printf("failure in allocating host memory for output data.\n");
exit(1);
}
/* ccutStartTimer(timer); */
ccudaEventRecord(start, 0);
if (cudaGetLastError() != cudaSuccess) {
printf("error has been occured before kernel call.\n");
exit(1);
}
/* kernel call */
mtgp32_single_kernel<<< block_num, THREAD_NUM >>>(
d_status, d_data, num_data / block_num);
cudaThreadSynchronize();
e = cudaGetLastError();
if (e != cudaSuccess) {
printf("failure in kernel call.\n%s\n", cudaGetErrorString(e));
exit(1);
}
/* ccutStopTimer(timer); */
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaMemcpy(h_data, d_data, sizeof(uint32_t) * num_data,
cudaMemcpyDeviceToHost);
/* gputime = cutGetTimerValue(timer); */
ccudaEventElapsedTime(&gputime, start, end);
print_float_array(h_data, num_data, block_num);
printf("generated numbers: %d\n", num_data);
printf("Processing time: %f (ms)\n", gputime);
printf("Samples per second: %E \n", num_data / (gputime * 0.001));
/* ccutDeleteTimer(timer); */
ccudaEventDestroy(start);
ccudaEventDestroy(end);
//free memories
free(h_data);
ccudaFree(d_data);
}
int main(int argc, char** argv)
{
// LARGE_SIZE is a multiple of 16
int num_data = 10000000;
int block_num;
int num_unit;
int r;
int device = 0;
mtgp32_kernel_status_t *d_status;
int mb, mp;
ccudaSetDevice(device);
if (argc >= 2) {
errno = 0;
block_num = strtol(argv[1], NULL, 10);
if (errno) {
printf("%s number_of_block number_of_output\n", argv[0]);
return 1;
}
if (block_num < 1 || block_num > BLOCK_NUM_MAX) {
printf("%s block_num should be between 1 and %d\n",
argv[0], BLOCK_NUM_MAX);
return 1;
}
errno = 0;
num_data = strtol(argv[2], NULL, 10);
if (errno) {
printf("%s number_of_block number_of_output\n", argv[0]);
return 1;
}
argc -= 2;
argv += 2;
} else {
printf("%s number_of_block number_of_output\n", argv[0]);
block_num = get_suitable_block_num(device,
&mb,
&mp,
sizeof(uint32_t),
THREAD_NUM,
LARGE_SIZE);
if (block_num <= 0) {
printf("can't calculate sutable number of blocks.\n");
return 1;
}
printf("the suitable number of blocks for device 0 "
"will be multiple of %d, or multiple of %d\n", block_num,
(mb - 1) * mp);
return 1;
}
num_unit = LARGE_SIZE * block_num;
ccudaMalloc((void**)&d_status,
sizeof(mtgp32_kernel_status_t) * block_num);
r = num_data % num_unit;
if (r != 0) {
num_data = num_data + num_unit - r;
}
make_constant(MTGPDC_PARAM_TABLE, block_num);
make_kernel_data32(d_status, MTGPDC_PARAM_TABLE, block_num);
make_uint32_random(d_status, num_data, block_num);
make_single_random(d_status, num_data, block_num);
ccudaFree(d_status);
}
|
7010d876fe65a3236dc5cf0891d93ee437dc8264.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MaskedVeloClustering.cuh"
// Mask for any one pixel array element's next iteration
__device__ uint32_t current_mask(uint32_t p) {
return ((p&VeloClustering::mask_top) << 1)
| ((p&VeloClustering::mask_bottom) >> 1)
| ((p&VeloClustering::mask_bottom_right) << 15)
| ((p&VeloClustering::mask_top_left) >> 15)
| (p >> 16)
| (p >> 17)
| (p << 16)
| (p << 17);
}
// Mask from a pixel array element on the left
// to be applied on the pixel array element on the right
__device__ uint32_t mask_from_left_to_right(uint32_t p) {
return ((p&VeloClustering::mask_ltr_top_right) >> 15)
| (p >> 16)
| (p >> 17);
}
// Mask from a pixel array element on the right
// to be applied on the pixel array element on the left
__device__ uint32_t mask_from_right_to_left(uint32_t p) {
return ((p&VeloClustering::mask_rtl_bottom_left) << 15)
| (p << 16)
| (p << 17);
}
// Create mask for found clusters
// o o
// x o
// o
__device__ uint32_t cluster_current_mask(uint32_t p) {
return ((p&VeloClustering::mask_top) << 1)
| ((p&VeloClustering::mask_bottom_right) << 15)
| (p << 16)
| (p << 17);
}
// Require the four pixels of the pattern in order to
// get the candidates
__device__ uint32_t candidates_current_mask(uint32_t p) {
return ((p&VeloClustering::mask_bottom) >> 1)
& ((p&VeloClustering::mask_top_left) >> 15)
& (p >> 16)
& (p >> 17);
}
__device__ uint32_t candidates_current_mask_with_right_clusters(
uint32_t p,
uint32_t rp
) {
return ((p&VeloClustering::mask_bottom) >> 1)
& (((p&VeloClustering::mask_top_left) >> 15) | (rp << 17))
& ((p >> 16) | (rp << 16))
& ((p >> 17) | ((rp&VeloClustering::mask_rtl_bottom_left) << 15));
}
__global__ void masked_velo_clustering(
char* dev_raw_input,
uint* dev_raw_input_offsets,
uint* dev_module_cluster_start,
uint* dev_module_cluster_num,
uint* dev_event_candidate_num,
uint* dev_cluster_candidates,
uint32_t* dev_velo_cluster_container,
char* dev_velo_geometry
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const char* raw_input = dev_raw_input + dev_raw_input_offsets[event_number];
const uint* module_cluster_start = dev_module_cluster_start + event_number * VeloTracking::n_modules;
uint* module_cluster_num = dev_module_cluster_num + event_number * VeloTracking::n_modules;
uint number_of_candidates = dev_event_candidate_num[event_number];
uint32_t* cluster_candidates = (uint32_t*) &dev_cluster_candidates[event_number * VeloClustering::max_candidates_event];
// Local pointers to dev_velo_cluster_container
const uint estimated_number_of_clusters = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
float* cluster_xs = (float*) &dev_velo_cluster_container[0];
float* cluster_ys = (float*) &dev_velo_cluster_container[estimated_number_of_clusters];
float* cluster_zs = (float*) &dev_velo_cluster_container[2 * estimated_number_of_clusters];
uint32_t* cluster_ids = (uint32_t*) &dev_velo_cluster_container[3 * estimated_number_of_clusters];
// Load Velo geometry (assume it is the same for all events)
const VeloGeometry g (dev_velo_geometry);
// Read raw event
const auto raw_event = VeloRawEvent(raw_input);
// process no neighbour sp
for (int i=0; i<(raw_event.number_of_raw_banks + blockDim.x - 1) / blockDim.x; ++i) {
const auto raw_bank_number = i*blockDim.x + threadIdx.x;
if (raw_bank_number < raw_event.number_of_raw_banks) {
const auto module_number = raw_bank_number >> 2;
const uint cluster_start = module_cluster_start[module_number];
// Read raw bank
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
const float* ltg = g.ltg + 16 * raw_bank.sensor_index;
for (int sp_index=0; sp_index<raw_bank.sp_count; ++sp_index) {
// Decode sp
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
const uint32_t no_sp_neighbours = sp_word & 0x80000000U;
// There are no neighbours, so compute the number of pixels of this superpixel
if (no_sp_neighbours) {
// Look up pre-generated patterns
const int32_t sp_row = sp_addr & 0x3FU;
const int32_t sp_col = (sp_addr >> 6);
const uint8_t sp = sp_word & 0xFFU;
const uint32_t idx = VeloClustering::sp_patterns[sp];
const uint32_t chip = sp_col / (VP::ChipColumns / 2);
{
// there is always at least one cluster in the super
// pixel. look up the pattern and add it.
const uint32_t row = idx & 0x03U;
const uint32_t col = (idx >> 2) & 1;
const uint32_t cx = sp_col * 2 + col;
const uint32_t cy = sp_row * 4 + row;
const uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float fx = VeloClustering::sp_fx[sp * 2];
const float fy = VeloClustering::sp_fy[sp * 2];
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = get_lhcb_id(cid);
}
// if there is a second cluster for this pattern
// add it as well.
if (idx&8) {
const uint32_t row = (idx >> 4) & 3;
const uint32_t col = (idx >> 6) & 1;
const uint32_t cx = sp_col * 2 + col;
const uint32_t cy = sp_row * 4 + row;
uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float fx = VeloClustering::sp_fx[sp * 2 + 1];
const float fy = VeloClustering::sp_fy[sp * 2 + 1];
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = get_lhcb_id(cid);
}
}
}
}
}
__syncthreads();
// Process rest of clusters
for (int i=0; i<(number_of_candidates + blockDim.x - 1) / blockDim.x; ++i) {
const auto candidate_number = i*blockDim.x + threadIdx.x;
if (candidate_number < number_of_candidates) {
const uint32_t candidate = cluster_candidates[candidate_number];
const uint8_t sp_index = candidate >> 11;
const uint8_t raw_bank_number = (candidate >> 3) & 0xFF;
const uint32_t module_number = raw_bank_number >> 2;
const uint8_t candidate_k = candidate & 0x7;
assert(raw_bank_number < VeloTracking::n_sensors);
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
const float* ltg = g.ltg + 16 * raw_bank.sensor_index;
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
// Note: In the code below, row and col are int32_t (not unsigned)
// This is not a bug
const int32_t sp_row = sp_addr & 0x3FU;
const int32_t sp_col = sp_addr >> 6;
// Find candidates that follow this condition:
// For pixel x, all pixels o should *not* be populated
// o o
// x o
// o
// Load the following SPs,
// where x is the SP containing the possible candidates, o are other SPs:
// oooo
// oxoo
// oooo
// oooo
//
// Each column of SPs are in one uint32_t
// Order is from left to right
//
// 0: o 1: o 2: o 3: o
// o x o o
// o o o o
// o o o o
//
// Order inside an uint32_t is from bottom to top. Eg. 1:
// 3: o
// 2: x
// 1: o
// 0: o
uint32_t pixel_array [3] = {0, 0, 0};
// sp limits to load
const int32_t sp_row_lower_limit = sp_row - 2;
const int32_t sp_row_upper_limit = sp_row + 1;
const int32_t sp_col_lower_limit = sp_col - 1;
const int32_t sp_col_upper_limit = sp_col + 1;
// Row limits
const int32_t row_lower_limit = sp_row_lower_limit * 4;
const int32_t col_lower_limit = sp_col_lower_limit * 2;
// Load SPs
// Note: We will pick up the current one,
// no need to add a special case
for (uint k=0; k<raw_bank.sp_count; ++k) {
const uint32_t other_sp_word = raw_bank.sp_word[k];
const uint32_t other_no_sp_neighbours = other_sp_word & 0x80000000U;
if (!other_no_sp_neighbours) {
const uint32_t other_sp_addr = (other_sp_word & 0x007FFF00U) >> 8;
const int32_t other_sp_row = other_sp_addr & 0x3FU;
const int32_t other_sp_col = (other_sp_addr >> 6);
const uint8_t other_sp = other_sp_word & 0xFFU;
if (other_sp_row >= sp_row_lower_limit
&& other_sp_row <= sp_row_upper_limit
&& other_sp_col >= sp_col_lower_limit
&& other_sp_col <= sp_col_upper_limit
) {
const int relative_row = other_sp_row - sp_row_lower_limit;
const int relative_col = other_sp_col - sp_col_lower_limit;
// Note: Order is:
// 15 31
// 14 30
// 13 29
// 12 28
// 11 27
// 10 26
// 9 25
// 8 24
// 7 23
// 6 22
// 5 21
// 4 20
// 3 19
// 2 18
// 1 17
// 0 16
pixel_array[relative_col] |= (other_sp&0X0F) << (4*relative_row)
| (other_sp&0XF0) << (12 + 4*relative_row);
}
}
}
// Work with candidate k
const uint32_t row = sp_row * 4 + (candidate_k % 4);
const uint32_t col = sp_col * 2 + (candidate_k >= 4);
// Cluster
// This will contain our building cluster
// Start it with row, col element active
uint32_t cluster [3] = {0, (uint32_t) ((0x01 << (row - row_lower_limit)) << (16 * (col % 2))), 0};
// Current cluster being considered for generating the mask
uint32_t working_cluster [3] = {0, cluster[1], 0};
// Delete pixels in cluster from pixels
pixel_array[1] &= ~cluster[1];
// Perform actual clustering
for (int clustering_iterations=0; clustering_iterations<VeloClustering::max_clustering_iterations; ++clustering_iterations) {
// Create mask for working cluster
uint32_t pixel_mask [3];
pixel_mask[0] = current_mask(working_cluster[0])
| mask_from_right_to_left(working_cluster[1]);
pixel_mask[1] = current_mask(working_cluster[1])
| mask_from_right_to_left(working_cluster[2])
| mask_from_left_to_right(working_cluster[0]);
pixel_mask[2] = current_mask(working_cluster[2])
| mask_from_left_to_right(working_cluster[1]);
// Calculate new elements
working_cluster[0] = pixel_array[0] & pixel_mask[0];
working_cluster[1] = pixel_array[1] & pixel_mask[1];
working_cluster[2] = pixel_array[2] & pixel_mask[2];
if (working_cluster[0]==0 && working_cluster[1]==0 && working_cluster[2]==0) {
break;
}
// Add new elements to cluster
cluster[0] |= working_cluster[0];
cluster[1] |= working_cluster[1];
cluster[2] |= working_cluster[2];
// Delete elements from pixel array
pixel_array[0] &= ~cluster[0];
pixel_array[1] &= ~cluster[1];
pixel_array[2] &= ~cluster[2];
}
// Early break: If there are any pixels
// active in SPs to the right, then
// there must be another pixel eventually
// fulfilling the condition
if (cluster[2]) {
continue;
}
// Calculate x and y from our formed cluster
// number of active clusters
const int n = __popc(cluster[0])
+ __popc(cluster[1]);
// Prune repeated clusters
// Only check for repeated clusters for clusters with at least 3 elements
bool do_store = true;
if (n >= 3) {
// Apply mask for found clusters
// o o
// x o
// o
uint32_t pixel_mask [4];
pixel_mask[0] = cluster_current_mask(cluster[0]);
pixel_mask[1] = cluster_current_mask(cluster[1])
| mask_from_left_to_right(cluster[0]);
pixel_mask[2] = mask_from_left_to_right(cluster[1]);
// Do "and not" with found clusters
// This should return patterns like these:
// x x
// x
// x
working_cluster[0] = pixel_mask[0] & (~cluster[0]);
working_cluster[1] = pixel_mask[1] & (~cluster[1]);
working_cluster[2] = pixel_mask[2];
// Require the four pixels of the pattern in order to
// get the candidates
uint32_t candidates [2];
candidates[0] = candidates_current_mask_with_right_clusters(working_cluster[0], working_cluster[1]);
candidates[1] = candidates_current_mask_with_right_clusters(working_cluster[1], working_cluster[2]);
// candidates = candidates "and" clusters, to get the real candidates
candidates[0] &= cluster[0];
candidates[1] &= cluster[1];
// Remove our cluster candidate
const uint32_t working_candidate = (0x01 << (row - row_lower_limit)) << (16 * (col % 2));
candidates[1] ^= working_candidate;
// Check if there is another candidate with precedence
if (candidates[0] || candidates[1]) {
// Precedence:
// The current candidate should not be considered if there is another candidate
// with a smaller row, or a bigger column
//
// In order to calculate the last part, we can use the following trick:
// In two's complement:
// 32: 00100000
// -32: 11100000
// ~(-32): 00011111 (the mask we want)
const int32_t negative_working_candidate_mask = ~(-working_candidate);
const bool working_candidate_under_threshold = working_candidate<4096;
// Smaller row on candidates[1]
uint32_t smaller_row_pixel_mask = working_candidate_under_threshold * (0xFFF&negative_working_candidate_mask)
| (!working_candidate_under_threshold) * (0xFFF&(negative_working_candidate_mask>>16));
smaller_row_pixel_mask |= smaller_row_pixel_mask << 16;
// In order to do the current pixel mask, add the eventual bigger column
// ie: (add the second column)
// oo
// xo
// oo
// oo
const uint32_t current_pixel_mask = smaller_row_pixel_mask
| working_candidate_under_threshold * 0xFFFF0000;
// Compute do_store
do_store = ((candidates[0]&smaller_row_pixel_mask)
| (candidates[1]¤t_pixel_mask)) == 0;
}
}
if (do_store) {
// Added value of all x
const int x = __popc(cluster[0]&0x0000FFFF)*col_lower_limit
+ __popc(cluster[0]&0xFFFF0000)*(col_lower_limit+1)
+ __popc(cluster[1]&0x0000FFFF)*(col_lower_limit+2)
+ __popc(cluster[1]&0xFFFF0000)*(col_lower_limit+3);
// Transpose momentarily clusters to obtain y in an easier way
const uint32_t transposed_clusters [4] = {
( cluster[0]&0x000F000F) | ((cluster[1]&0x000F000F) << 4),
((cluster[0]&0x00F000F0) >> 4) | ( cluster[1]&0x00F000F0) ,
((cluster[0]&0x0F000F00) >> 8) | ((cluster[1]&0x0F000F00) >> 4),
((cluster[0]&0xF000F000) >> 12) | ((cluster[1]&0xF000F000) >> 8)
};
// Added value of all y
const int y = __popc(transposed_clusters[0]&0x11111111)*row_lower_limit
+ __popc(transposed_clusters[0]&0x22222222)*(row_lower_limit+1)
+ __popc(transposed_clusters[0]&0x44444444)*(row_lower_limit+2)
+ __popc(transposed_clusters[0]&0x88888888)*(row_lower_limit+3)
+ __popc(transposed_clusters[1]&0x11111111)*(row_lower_limit+4)
+ __popc(transposed_clusters[1]&0x22222222)*(row_lower_limit+5)
+ __popc(transposed_clusters[1]&0x44444444)*(row_lower_limit+6)
+ __popc(transposed_clusters[1]&0x88888888)*(row_lower_limit+7)
+ __popc(transposed_clusters[2]&0x11111111)*(row_lower_limit+8)
+ __popc(transposed_clusters[2]&0x22222222)*(row_lower_limit+9)
+ __popc(transposed_clusters[2]&0x44444444)*(row_lower_limit+10)
+ __popc(transposed_clusters[2]&0x88888888)*(row_lower_limit+11)
+ __popc(transposed_clusters[3]&0x11111111)*(row_lower_limit+12)
+ __popc(transposed_clusters[3]&0x22222222)*(row_lower_limit+13)
+ __popc(transposed_clusters[3]&0x44444444)*(row_lower_limit+14)
+ __popc(transposed_clusters[3]&0x88888888)*(row_lower_limit+15);
const uint cx = x / n;
const uint cy = y / n;
const float fx = x / static_cast<float>(n) - cx;
const float fy = y / static_cast<float>(n) - cy;
// store target (3D point for tracking)
const uint32_t chip = cx / VP::ChipColumns;
uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
const uint cluster_start = module_cluster_start[module_number];
const auto lhcb_id = get_lhcb_id(cid);
assert((cluster_start + cluster_num) < estimated_number_of_clusters);
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = lhcb_id;
}
}
}
}
| 7010d876fe65a3236dc5cf0891d93ee437dc8264.cu | #include "MaskedVeloClustering.cuh"
// Mask for any one pixel array element's next iteration
__device__ uint32_t current_mask(uint32_t p) {
return ((p&VeloClustering::mask_top) << 1)
| ((p&VeloClustering::mask_bottom) >> 1)
| ((p&VeloClustering::mask_bottom_right) << 15)
| ((p&VeloClustering::mask_top_left) >> 15)
| (p >> 16)
| (p >> 17)
| (p << 16)
| (p << 17);
}
// Mask from a pixel array element on the left
// to be applied on the pixel array element on the right
__device__ uint32_t mask_from_left_to_right(uint32_t p) {
return ((p&VeloClustering::mask_ltr_top_right) >> 15)
| (p >> 16)
| (p >> 17);
}
// Mask from a pixel array element on the right
// to be applied on the pixel array element on the left
__device__ uint32_t mask_from_right_to_left(uint32_t p) {
return ((p&VeloClustering::mask_rtl_bottom_left) << 15)
| (p << 16)
| (p << 17);
}
// Create mask for found clusters
// o o
// x o
// o
__device__ uint32_t cluster_current_mask(uint32_t p) {
return ((p&VeloClustering::mask_top) << 1)
| ((p&VeloClustering::mask_bottom_right) << 15)
| (p << 16)
| (p << 17);
}
// Require the four pixels of the pattern in order to
// get the candidates
__device__ uint32_t candidates_current_mask(uint32_t p) {
return ((p&VeloClustering::mask_bottom) >> 1)
& ((p&VeloClustering::mask_top_left) >> 15)
& (p >> 16)
& (p >> 17);
}
__device__ uint32_t candidates_current_mask_with_right_clusters(
uint32_t p,
uint32_t rp
) {
return ((p&VeloClustering::mask_bottom) >> 1)
& (((p&VeloClustering::mask_top_left) >> 15) | (rp << 17))
& ((p >> 16) | (rp << 16))
& ((p >> 17) | ((rp&VeloClustering::mask_rtl_bottom_left) << 15));
}
__global__ void masked_velo_clustering(
char* dev_raw_input,
uint* dev_raw_input_offsets,
uint* dev_module_cluster_start,
uint* dev_module_cluster_num,
uint* dev_event_candidate_num,
uint* dev_cluster_candidates,
uint32_t* dev_velo_cluster_container,
char* dev_velo_geometry
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const char* raw_input = dev_raw_input + dev_raw_input_offsets[event_number];
const uint* module_cluster_start = dev_module_cluster_start + event_number * VeloTracking::n_modules;
uint* module_cluster_num = dev_module_cluster_num + event_number * VeloTracking::n_modules;
uint number_of_candidates = dev_event_candidate_num[event_number];
uint32_t* cluster_candidates = (uint32_t*) &dev_cluster_candidates[event_number * VeloClustering::max_candidates_event];
// Local pointers to dev_velo_cluster_container
const uint estimated_number_of_clusters = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
float* cluster_xs = (float*) &dev_velo_cluster_container[0];
float* cluster_ys = (float*) &dev_velo_cluster_container[estimated_number_of_clusters];
float* cluster_zs = (float*) &dev_velo_cluster_container[2 * estimated_number_of_clusters];
uint32_t* cluster_ids = (uint32_t*) &dev_velo_cluster_container[3 * estimated_number_of_clusters];
// Load Velo geometry (assume it is the same for all events)
const VeloGeometry g (dev_velo_geometry);
// Read raw event
const auto raw_event = VeloRawEvent(raw_input);
// process no neighbour sp
for (int i=0; i<(raw_event.number_of_raw_banks + blockDim.x - 1) / blockDim.x; ++i) {
const auto raw_bank_number = i*blockDim.x + threadIdx.x;
if (raw_bank_number < raw_event.number_of_raw_banks) {
const auto module_number = raw_bank_number >> 2;
const uint cluster_start = module_cluster_start[module_number];
// Read raw bank
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
const float* ltg = g.ltg + 16 * raw_bank.sensor_index;
for (int sp_index=0; sp_index<raw_bank.sp_count; ++sp_index) {
// Decode sp
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
const uint32_t no_sp_neighbours = sp_word & 0x80000000U;
// There are no neighbours, so compute the number of pixels of this superpixel
if (no_sp_neighbours) {
// Look up pre-generated patterns
const int32_t sp_row = sp_addr & 0x3FU;
const int32_t sp_col = (sp_addr >> 6);
const uint8_t sp = sp_word & 0xFFU;
const uint32_t idx = VeloClustering::sp_patterns[sp];
const uint32_t chip = sp_col / (VP::ChipColumns / 2);
{
// there is always at least one cluster in the super
// pixel. look up the pattern and add it.
const uint32_t row = idx & 0x03U;
const uint32_t col = (idx >> 2) & 1;
const uint32_t cx = sp_col * 2 + col;
const uint32_t cy = sp_row * 4 + row;
const uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float fx = VeloClustering::sp_fx[sp * 2];
const float fy = VeloClustering::sp_fy[sp * 2];
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = get_lhcb_id(cid);
}
// if there is a second cluster for this pattern
// add it as well.
if (idx&8) {
const uint32_t row = (idx >> 4) & 3;
const uint32_t col = (idx >> 6) & 1;
const uint32_t cx = sp_col * 2 + col;
const uint32_t cy = sp_row * 4 + row;
uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float fx = VeloClustering::sp_fx[sp * 2 + 1];
const float fy = VeloClustering::sp_fy[sp * 2 + 1];
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = get_lhcb_id(cid);
}
}
}
}
}
__syncthreads();
// Process rest of clusters
for (int i=0; i<(number_of_candidates + blockDim.x - 1) / blockDim.x; ++i) {
const auto candidate_number = i*blockDim.x + threadIdx.x;
if (candidate_number < number_of_candidates) {
const uint32_t candidate = cluster_candidates[candidate_number];
const uint8_t sp_index = candidate >> 11;
const uint8_t raw_bank_number = (candidate >> 3) & 0xFF;
const uint32_t module_number = raw_bank_number >> 2;
const uint8_t candidate_k = candidate & 0x7;
assert(raw_bank_number < VeloTracking::n_sensors);
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
const float* ltg = g.ltg + 16 * raw_bank.sensor_index;
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
// Note: In the code below, row and col are int32_t (not unsigned)
// This is not a bug
const int32_t sp_row = sp_addr & 0x3FU;
const int32_t sp_col = sp_addr >> 6;
// Find candidates that follow this condition:
// For pixel x, all pixels o should *not* be populated
// o o
// x o
// o
// Load the following SPs,
// where x is the SP containing the possible candidates, o are other SPs:
// oooo
// oxoo
// oooo
// oooo
//
// Each column of SPs are in one uint32_t
// Order is from left to right
//
// 0: o 1: o 2: o 3: o
// o x o o
// o o o o
// o o o o
//
// Order inside an uint32_t is from bottom to top. Eg. 1:
// 3: o
// 2: x
// 1: o
// 0: o
uint32_t pixel_array [3] = {0, 0, 0};
// sp limits to load
const int32_t sp_row_lower_limit = sp_row - 2;
const int32_t sp_row_upper_limit = sp_row + 1;
const int32_t sp_col_lower_limit = sp_col - 1;
const int32_t sp_col_upper_limit = sp_col + 1;
// Row limits
const int32_t row_lower_limit = sp_row_lower_limit * 4;
const int32_t col_lower_limit = sp_col_lower_limit * 2;
// Load SPs
// Note: We will pick up the current one,
// no need to add a special case
for (uint k=0; k<raw_bank.sp_count; ++k) {
const uint32_t other_sp_word = raw_bank.sp_word[k];
const uint32_t other_no_sp_neighbours = other_sp_word & 0x80000000U;
if (!other_no_sp_neighbours) {
const uint32_t other_sp_addr = (other_sp_word & 0x007FFF00U) >> 8;
const int32_t other_sp_row = other_sp_addr & 0x3FU;
const int32_t other_sp_col = (other_sp_addr >> 6);
const uint8_t other_sp = other_sp_word & 0xFFU;
if (other_sp_row >= sp_row_lower_limit
&& other_sp_row <= sp_row_upper_limit
&& other_sp_col >= sp_col_lower_limit
&& other_sp_col <= sp_col_upper_limit
) {
const int relative_row = other_sp_row - sp_row_lower_limit;
const int relative_col = other_sp_col - sp_col_lower_limit;
// Note: Order is:
// 15 31
// 14 30
// 13 29
// 12 28
// 11 27
// 10 26
// 9 25
// 8 24
// 7 23
// 6 22
// 5 21
// 4 20
// 3 19
// 2 18
// 1 17
// 0 16
pixel_array[relative_col] |= (other_sp&0X0F) << (4*relative_row)
| (other_sp&0XF0) << (12 + 4*relative_row);
}
}
}
// Work with candidate k
const uint32_t row = sp_row * 4 + (candidate_k % 4);
const uint32_t col = sp_col * 2 + (candidate_k >= 4);
// Cluster
// This will contain our building cluster
// Start it with row, col element active
uint32_t cluster [3] = {0, (uint32_t) ((0x01 << (row - row_lower_limit)) << (16 * (col % 2))), 0};
// Current cluster being considered for generating the mask
uint32_t working_cluster [3] = {0, cluster[1], 0};
// Delete pixels in cluster from pixels
pixel_array[1] &= ~cluster[1];
// Perform actual clustering
for (int clustering_iterations=0; clustering_iterations<VeloClustering::max_clustering_iterations; ++clustering_iterations) {
// Create mask for working cluster
uint32_t pixel_mask [3];
pixel_mask[0] = current_mask(working_cluster[0])
| mask_from_right_to_left(working_cluster[1]);
pixel_mask[1] = current_mask(working_cluster[1])
| mask_from_right_to_left(working_cluster[2])
| mask_from_left_to_right(working_cluster[0]);
pixel_mask[2] = current_mask(working_cluster[2])
| mask_from_left_to_right(working_cluster[1]);
// Calculate new elements
working_cluster[0] = pixel_array[0] & pixel_mask[0];
working_cluster[1] = pixel_array[1] & pixel_mask[1];
working_cluster[2] = pixel_array[2] & pixel_mask[2];
if (working_cluster[0]==0 && working_cluster[1]==0 && working_cluster[2]==0) {
break;
}
// Add new elements to cluster
cluster[0] |= working_cluster[0];
cluster[1] |= working_cluster[1];
cluster[2] |= working_cluster[2];
// Delete elements from pixel array
pixel_array[0] &= ~cluster[0];
pixel_array[1] &= ~cluster[1];
pixel_array[2] &= ~cluster[2];
}
// Early break: If there are any pixels
// active in SPs to the right, then
// there must be another pixel eventually
// fulfilling the condition
if (cluster[2]) {
continue;
}
// Calculate x and y from our formed cluster
// number of active clusters
const int n = __popc(cluster[0])
+ __popc(cluster[1]);
// Prune repeated clusters
// Only check for repeated clusters for clusters with at least 3 elements
bool do_store = true;
if (n >= 3) {
// Apply mask for found clusters
// o o
// x o
// o
uint32_t pixel_mask [4];
pixel_mask[0] = cluster_current_mask(cluster[0]);
pixel_mask[1] = cluster_current_mask(cluster[1])
| mask_from_left_to_right(cluster[0]);
pixel_mask[2] = mask_from_left_to_right(cluster[1]);
// Do "and not" with found clusters
// This should return patterns like these:
// x x
// x
// x
working_cluster[0] = pixel_mask[0] & (~cluster[0]);
working_cluster[1] = pixel_mask[1] & (~cluster[1]);
working_cluster[2] = pixel_mask[2];
// Require the four pixels of the pattern in order to
// get the candidates
uint32_t candidates [2];
candidates[0] = candidates_current_mask_with_right_clusters(working_cluster[0], working_cluster[1]);
candidates[1] = candidates_current_mask_with_right_clusters(working_cluster[1], working_cluster[2]);
// candidates = candidates "and" clusters, to get the real candidates
candidates[0] &= cluster[0];
candidates[1] &= cluster[1];
// Remove our cluster candidate
const uint32_t working_candidate = (0x01 << (row - row_lower_limit)) << (16 * (col % 2));
candidates[1] ^= working_candidate;
// Check if there is another candidate with precedence
if (candidates[0] || candidates[1]) {
// Precedence:
// The current candidate should not be considered if there is another candidate
// with a smaller row, or a bigger column
//
// In order to calculate the last part, we can use the following trick:
// In two's complement:
// 32: 00100000
// -32: 11100000
// ~(-32): 00011111 (the mask we want)
const int32_t negative_working_candidate_mask = ~(-working_candidate);
const bool working_candidate_under_threshold = working_candidate<4096;
// Smaller row on candidates[1]
uint32_t smaller_row_pixel_mask = working_candidate_under_threshold * (0xFFF&negative_working_candidate_mask)
| (!working_candidate_under_threshold) * (0xFFF&(negative_working_candidate_mask>>16));
smaller_row_pixel_mask |= smaller_row_pixel_mask << 16;
// In order to do the current pixel mask, add the eventual bigger column
// ie: (add the second column)
// oo
// xo
// oo
// oo
const uint32_t current_pixel_mask = smaller_row_pixel_mask
| working_candidate_under_threshold * 0xFFFF0000;
// Compute do_store
do_store = ((candidates[0]&smaller_row_pixel_mask)
| (candidates[1]¤t_pixel_mask)) == 0;
}
}
if (do_store) {
// Added value of all x
const int x = __popc(cluster[0]&0x0000FFFF)*col_lower_limit
+ __popc(cluster[0]&0xFFFF0000)*(col_lower_limit+1)
+ __popc(cluster[1]&0x0000FFFF)*(col_lower_limit+2)
+ __popc(cluster[1]&0xFFFF0000)*(col_lower_limit+3);
// Transpose momentarily clusters to obtain y in an easier way
const uint32_t transposed_clusters [4] = {
( cluster[0]&0x000F000F) | ((cluster[1]&0x000F000F) << 4),
((cluster[0]&0x00F000F0) >> 4) | ( cluster[1]&0x00F000F0) ,
((cluster[0]&0x0F000F00) >> 8) | ((cluster[1]&0x0F000F00) >> 4),
((cluster[0]&0xF000F000) >> 12) | ((cluster[1]&0xF000F000) >> 8)
};
// Added value of all y
const int y = __popc(transposed_clusters[0]&0x11111111)*row_lower_limit
+ __popc(transposed_clusters[0]&0x22222222)*(row_lower_limit+1)
+ __popc(transposed_clusters[0]&0x44444444)*(row_lower_limit+2)
+ __popc(transposed_clusters[0]&0x88888888)*(row_lower_limit+3)
+ __popc(transposed_clusters[1]&0x11111111)*(row_lower_limit+4)
+ __popc(transposed_clusters[1]&0x22222222)*(row_lower_limit+5)
+ __popc(transposed_clusters[1]&0x44444444)*(row_lower_limit+6)
+ __popc(transposed_clusters[1]&0x88888888)*(row_lower_limit+7)
+ __popc(transposed_clusters[2]&0x11111111)*(row_lower_limit+8)
+ __popc(transposed_clusters[2]&0x22222222)*(row_lower_limit+9)
+ __popc(transposed_clusters[2]&0x44444444)*(row_lower_limit+10)
+ __popc(transposed_clusters[2]&0x88888888)*(row_lower_limit+11)
+ __popc(transposed_clusters[3]&0x11111111)*(row_lower_limit+12)
+ __popc(transposed_clusters[3]&0x22222222)*(row_lower_limit+13)
+ __popc(transposed_clusters[3]&0x44444444)*(row_lower_limit+14)
+ __popc(transposed_clusters[3]&0x88888888)*(row_lower_limit+15);
const uint cx = x / n;
const uint cy = y / n;
const float fx = x / static_cast<float>(n) - cx;
const float fy = y / static_cast<float>(n) - cy;
// store target (3D point for tracking)
const uint32_t chip = cx / VP::ChipColumns;
uint cid = get_channel_id(raw_bank.sensor_index, chip, cx % VP::ChipColumns, cy);
const float local_x = g.local_x[cx] + fx * g.x_pitch[cx];
const float local_y = (cy + 0.5 + fy) * g.pixel_size;
const uint cluster_num = atomicAdd(module_cluster_num + module_number, 1);
const float gx = ltg[0] * local_x + ltg[1] * local_y + ltg[9];
const float gy = ltg[3] * local_x + ltg[4] * local_y + ltg[10];
const float gz = ltg[6] * local_x + ltg[7] * local_y + ltg[11];
const uint cluster_start = module_cluster_start[module_number];
const auto lhcb_id = get_lhcb_id(cid);
assert((cluster_start + cluster_num) < estimated_number_of_clusters);
cluster_xs[cluster_start + cluster_num] = gx;
cluster_ys[cluster_start + cluster_num] = gy;
cluster_zs[cluster_start + cluster_num] = gz;
cluster_ids[cluster_start + cluster_num] = lhcb_id;
}
}
}
}
|
59f06fe6d9a5ce350b526cbb6ed816c460e8a72f.hip | // !!! This is a file automatically generated by hipify!!!
#include <from_cudf/cpp_src/bitmask/legacy/bit_mask.cuh>
#include <cudf/legacy/table.hpp>
#include <from_cudf/cpp_src/bitmask/legacy/legacy_bitmask.hpp>
#include <cudf/cudf.h>
#include <cudf/legacy/functions.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <from_cudf/cpp_src/utilities/legacy/cudf_utils.h>
#include <cudf/utilities/error.hpp>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <cassert>
#include <hipcub/hipcub.hpp>
#include <vector>
#include <algorithm>
// To account for if cudf::valid_type is not a 4 byte type,
// compute the RATIO of the number of bytes in cudf::valid_type
// to the 4 byte type being used for casting
using valid32_t = uint32_t;
constexpr size_t RATIO = sizeof(valid32_t) / sizeof(cudf::valid_type);
constexpr int BITS_PER_MASK32 = GDF_VALID_BITSIZE * RATIO;
constexpr int block_size = 256;
namespace {
/**
* @brief Kernel to count the number of set bits in a column's validity buffer
*
* The underlying buffer type may only be a 1B type, but it is casted to a 4B
* type (valid32_t) such that __popc may be used to more efficiently count the
* number of set bits. This requires handling the last 4B element as a special
* case as the buffer may not be a multiple of 4 bytes.
*
* @param[in] masks32 Pointer to buffer (casted as a 4B type) whose bits will be
* counted
* @param[in] num_masks32 The number of 4B elements in the buffer
* @param[in] num_rows The number of rows in the column, i.e., the number of
* bits in the buffer that correspond to rows
* @param[out] global_count The number of set bits in the range of bits [0,
* num_rows)
*/
template <typename size_type>
__global__ void count_valid_bits(valid32_t const* const masks32,
int const num_masks32, int const num_rows,
size_type* const global_count) {
using BlockReduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// If the number of rows is not a multiple of 32, then the remaining
// rows need to be handled separtely because not all of its bits correspond
// to rows
int last_mask32{0};
int const num_rows_last_mask{num_rows % BITS_PER_MASK32};
if (0 == num_rows_last_mask)
last_mask32 = num_masks32;
else
last_mask32 = num_masks32 - 1;
int const idx{static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x)};
int cur_mask{idx};
size_type my_count{0};
// Use popc to count the valid bits for the all of the masks
// where all of the bits correspond to rows
while (cur_mask < last_mask32) {
my_count += __popc(masks32[cur_mask]);
cur_mask += blockDim.x * gridDim.x;
}
// Handle the remainder rows
if (idx < num_rows_last_mask) {
cudf::valid_type const* const valids{
reinterpret_cast<cudf::valid_type const*>(masks32)};
int const my_row{num_rows - idx - 1};
if (true == gdf_is_valid(valids, my_row)) ++my_count;
}
// Reduces the count from each thread in a block into a block count
int const block_count{BlockReduce(temp_storage).Sum(my_count)};
// Store the block count into the global count
if (threadIdx.x == 0) {
atomicAdd(global_count, block_count);
}
}
} // namespace
gdf_error gdf_count_nonzero_mask(cudf::valid_type const* masks,
cudf::size_type num_rows, cudf::size_type* count) {
// TODO: add a default parameter hipStream_t stream = 0 when we move API to
// C++
if ((nullptr == count)) {
return GDF_DATASET_EMPTY;
}
if (0 == num_rows) {
*count = 0;
return GDF_SUCCESS;
}
if (nullptr == masks) {
*count = num_rows;
return GDF_SUCCESS;
}
// Masks will be proccessed as 4B types, therefore we require that the
// underlying type be less than or equal to 4B
static_assert(sizeof(valid32_t) >= sizeof(cudf::valid_type),
"cudf::valid_type is assumed to be <= 4B type");
// Number of cudf::valid_types in the validity bitmask
cudf::size_type const num_masks{gdf_num_bitmask_elements(num_rows)};
// Number of 4 byte types in the validity bit mask
cudf::size_type num_masks32{static_cast<cudf::size_type>(
::ceil(static_cast<float>(num_masks) / RATIO))};
cudf::size_type h_count{0};
if (num_masks32 > 0) {
// TODO: Probably shouldn't create/destroy the stream every time
hipStream_t count_stream;
CUDA_TRY(hipStreamCreate(&count_stream));
int* d_count{nullptr};
// Cast validity buffer to 4 byte type
valid32_t const* masks32{reinterpret_cast<valid32_t const*>(masks)};
RMM_TRY(RMM_ALLOC((void**)&d_count, sizeof(cudf::size_type), count_stream));
CUDA_TRY(hipMemsetAsync(d_count, 0, sizeof(cudf::size_type), count_stream));
cudf::size_type const grid_size{(num_masks32 + block_size - 1) / block_size};
hipLaunchKernelGGL(( count_valid_bits), dim3(grid_size), dim3(block_size), 0, count_stream,
masks32, num_masks32, num_rows, d_count);
CUDA_TRY(hipGetLastError());
CUDA_TRY(hipMemcpyAsync(&h_count, d_count, sizeof(cudf::size_type),
hipMemcpyDeviceToHost, count_stream));
RMM_TRY(RMM_FREE(d_count, count_stream));
CUDA_TRY(hipStreamSynchronize(count_stream));
CUDA_TRY(hipStreamDestroy(count_stream));
}
assert(h_count >= 0);
assert(h_count <= num_rows);
*count = h_count;
return GDF_SUCCESS;
}
gdf_error gdf_mask_concat(cudf::valid_type* output_mask,
cudf::size_type output_column_length,
gdf_column *columns_to_concat[],
cudf::size_type num_columns) {
std::vector<cudf::valid_type*> h_masks(num_columns);
std::vector<cudf::size_type> h_column_lengths(num_columns);
std::transform(columns_to_concat, columns_to_concat + num_columns,
h_masks.begin(), [](auto col) { return col->valid; });
std::transform(columns_to_concat, columns_to_concat + num_columns,
h_column_lengths.begin(), [](auto col) { return col->size; });
rmm::device_vector<cudf::valid_type*> d_masks(h_masks);
rmm::device_vector<cudf::size_type> d_column_lengths(h_column_lengths);
cudf::valid_type** masks_to_concat = thrust::raw_pointer_cast(d_masks.data());
cudf::size_type* column_lengths = thrust::raw_pointer_cast(d_column_lengths.data());
// This lambda is executed in a thrust algorithm. Each thread computes and
// returns one cudf::valid_type element for the concatenated output mask
auto mask_concatenator = [=] __device__(cudf::size_type mask_index) {
cudf::valid_type output_m = 0;
int cur_mask_index = 0, cur_mask_start = 0;
int cur_mask_len = column_lengths[0];
// Each thread processes one GDF_VALID_BITSIZE worth of valid bits
for (int bit = 0; bit < GDF_VALID_BITSIZE; ++bit) {
cudf::size_type output_index = mask_index * GDF_VALID_BITSIZE + bit;
// stop when we are beyond the length of the output column (in elements)
if (output_index >= output_column_length) break;
// find the next column's mask when we step past the current column's
// length
while ((cur_mask_start + cur_mask_len <= output_index) &&
(cur_mask_index < num_columns - 1)) {
cur_mask_start += cur_mask_len;
cur_mask_len = column_lengths[++cur_mask_index];
}
// Set each valid bit at the right location in this thread's output
// cudf::valid_type Note: gdf_is_valid returns true when the input mask is a
// null pointer This makes it behave as if columns with null validity
// masks have masks of all 1s, which is the desired behavior.
cudf::size_type index = output_index - cur_mask_start;
if (gdf_is_valid(masks_to_concat[cur_mask_index], index)) {
output_m |= (1 << bit);
}
}
return output_m;
};
// This is like thrust::for_each where the lambda gets the current index into
// the output array as input
thrust::tabulate(rmm::exec_policy()->on(0), output_mask,
output_mask + gdf_num_bitmask_elements(output_column_length),
mask_concatenator);
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
gdf_error all_bitmask_on(cudf::valid_type* valid_out,
cudf::size_type& out_null_count,
cudf::size_type num_values, hipStream_t stream) {
cudf::size_type num_bitmask_elements = gdf_num_bitmask_elements(num_values);
cudf::valid_type max_char = 255;
thrust::fill(rmm::exec_policy(stream)->on(stream), valid_out,
valid_out + num_bitmask_elements, max_char);
// we have no nulls so set all the bits in cudf::valid_type to 1
out_null_count = 0;
return GDF_SUCCESS;
}
gdf_error apply_bitmask_to_bitmask(cudf::size_type& out_null_count,
cudf::valid_type* valid_out,
const cudf::valid_type* valid_left,
const cudf::valid_type* valid_right,
hipStream_t stream,
cudf::size_type num_values) {
cudf::size_type num_bitmask_elements = gdf_num_bitmask_elements(num_values);
thrust::transform(rmm::exec_policy(stream)->on(stream), valid_left,
valid_left + num_bitmask_elements, valid_right, valid_out,
thrust::bit_and<cudf::valid_type>());
cudf::size_type non_nulls;
auto error = gdf_count_nonzero_mask(valid_out, num_values, &non_nulls);
out_null_count = num_values - non_nulls;
return error;
}
namespace cudf {
namespace {
/**
* @brief Computes a bitmask from the bitwise AND of a set of bitmasks.
*/
struct bitwise_and {
bitwise_and(bit_mask::bit_mask_t** _masks, cudf::size_type _num_masks)
: masks{_masks}, num_masks(_num_masks) {}
__device__ inline bit_mask::bit_mask_t operator()(
cudf::size_type mask_element_index) {
using namespace bit_mask;
bit_mask_t result_mask{~bit_mask_t{0}}; // all 1s
for (cudf::size_type i = 0; i < num_masks; ++i) {
result_mask &= masks[i][mask_element_index];
}
return result_mask;
}
cudf::size_type num_masks;
bit_mask::bit_mask_t** masks;
};
} // namespace
rmm::device_vector<bit_mask::bit_mask_t> row_bitmask(cudf::table const& table,
hipStream_t stream) {
using namespace bit_mask;
rmm::device_vector<bit_mask_t> row_bitmask(num_elements(table.num_rows()),
~bit_mask_t{0});
// Populate vector of pointers to the bitmasks of columns that contain
// NULL values
std::vector<bit_mask_t*> column_bitmasks{row_bitmask.data().get()};
std::for_each(
table.begin(), table.end(), [&column_bitmasks](gdf_column const* col) {
if ((nullptr != col->valid) and (col->null_count > 0)) {
column_bitmasks.push_back(reinterpret_cast<bit_mask_t*>(col->valid));
}
});
rmm::device_vector<bit_mask_t*> d_column_bitmasks{column_bitmasks};
// Compute bitwise AND of all key columns' bitmasks
thrust::tabulate(
rmm::exec_policy(stream)->on(stream), row_bitmask.begin(),
row_bitmask.end(),
bitwise_and(d_column_bitmasks.data().get(), d_column_bitmasks.size()));
return row_bitmask;
}
} // namespace cudf
| 59f06fe6d9a5ce350b526cbb6ed816c460e8a72f.cu | #include <from_cudf/cpp_src/bitmask/legacy/bit_mask.cuh>
#include <cudf/legacy/table.hpp>
#include <from_cudf/cpp_src/bitmask/legacy/legacy_bitmask.hpp>
#include <cudf/cudf.h>
#include <cudf/legacy/functions.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <from_cudf/cpp_src/utilities/legacy/cudf_utils.h>
#include <cudf/utilities/error.hpp>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <cassert>
#include <cub/cub.cuh>
#include <vector>
#include <algorithm>
// To account for if cudf::valid_type is not a 4 byte type,
// compute the RATIO of the number of bytes in cudf::valid_type
// to the 4 byte type being used for casting
using valid32_t = uint32_t;
constexpr size_t RATIO = sizeof(valid32_t) / sizeof(cudf::valid_type);
constexpr int BITS_PER_MASK32 = GDF_VALID_BITSIZE * RATIO;
constexpr int block_size = 256;
namespace {
/**
* @brief Kernel to count the number of set bits in a column's validity buffer
*
* The underlying buffer type may only be a 1B type, but it is casted to a 4B
* type (valid32_t) such that __popc may be used to more efficiently count the
* number of set bits. This requires handling the last 4B element as a special
* case as the buffer may not be a multiple of 4 bytes.
*
* @param[in] masks32 Pointer to buffer (casted as a 4B type) whose bits will be
* counted
* @param[in] num_masks32 The number of 4B elements in the buffer
* @param[in] num_rows The number of rows in the column, i.e., the number of
* bits in the buffer that correspond to rows
* @param[out] global_count The number of set bits in the range of bits [0,
* num_rows)
*/
template <typename size_type>
__global__ void count_valid_bits(valid32_t const* const masks32,
int const num_masks32, int const num_rows,
size_type* const global_count) {
using BlockReduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// If the number of rows is not a multiple of 32, then the remaining
// rows need to be handled separtely because not all of its bits correspond
// to rows
int last_mask32{0};
int const num_rows_last_mask{num_rows % BITS_PER_MASK32};
if (0 == num_rows_last_mask)
last_mask32 = num_masks32;
else
last_mask32 = num_masks32 - 1;
int const idx{static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x)};
int cur_mask{idx};
size_type my_count{0};
// Use popc to count the valid bits for the all of the masks
// where all of the bits correspond to rows
while (cur_mask < last_mask32) {
my_count += __popc(masks32[cur_mask]);
cur_mask += blockDim.x * gridDim.x;
}
// Handle the remainder rows
if (idx < num_rows_last_mask) {
cudf::valid_type const* const valids{
reinterpret_cast<cudf::valid_type const*>(masks32)};
int const my_row{num_rows - idx - 1};
if (true == gdf_is_valid(valids, my_row)) ++my_count;
}
// Reduces the count from each thread in a block into a block count
int const block_count{BlockReduce(temp_storage).Sum(my_count)};
// Store the block count into the global count
if (threadIdx.x == 0) {
atomicAdd(global_count, block_count);
}
}
} // namespace
gdf_error gdf_count_nonzero_mask(cudf::valid_type const* masks,
cudf::size_type num_rows, cudf::size_type* count) {
// TODO: add a default parameter cudaStream_t stream = 0 when we move API to
// C++
if ((nullptr == count)) {
return GDF_DATASET_EMPTY;
}
if (0 == num_rows) {
*count = 0;
return GDF_SUCCESS;
}
if (nullptr == masks) {
*count = num_rows;
return GDF_SUCCESS;
}
// Masks will be proccessed as 4B types, therefore we require that the
// underlying type be less than or equal to 4B
static_assert(sizeof(valid32_t) >= sizeof(cudf::valid_type),
"cudf::valid_type is assumed to be <= 4B type");
// Number of cudf::valid_types in the validity bitmask
cudf::size_type const num_masks{gdf_num_bitmask_elements(num_rows)};
// Number of 4 byte types in the validity bit mask
cudf::size_type num_masks32{static_cast<cudf::size_type>(
std::ceil(static_cast<float>(num_masks) / RATIO))};
cudf::size_type h_count{0};
if (num_masks32 > 0) {
// TODO: Probably shouldn't create/destroy the stream every time
cudaStream_t count_stream;
CUDA_TRY(cudaStreamCreate(&count_stream));
int* d_count{nullptr};
// Cast validity buffer to 4 byte type
valid32_t const* masks32{reinterpret_cast<valid32_t const*>(masks)};
RMM_TRY(RMM_ALLOC((void**)&d_count, sizeof(cudf::size_type), count_stream));
CUDA_TRY(cudaMemsetAsync(d_count, 0, sizeof(cudf::size_type), count_stream));
cudf::size_type const grid_size{(num_masks32 + block_size - 1) / block_size};
count_valid_bits<<<grid_size, block_size, 0, count_stream>>>(
masks32, num_masks32, num_rows, d_count);
CUDA_TRY(cudaGetLastError());
CUDA_TRY(cudaMemcpyAsync(&h_count, d_count, sizeof(cudf::size_type),
cudaMemcpyDeviceToHost, count_stream));
RMM_TRY(RMM_FREE(d_count, count_stream));
CUDA_TRY(cudaStreamSynchronize(count_stream));
CUDA_TRY(cudaStreamDestroy(count_stream));
}
assert(h_count >= 0);
assert(h_count <= num_rows);
*count = h_count;
return GDF_SUCCESS;
}
gdf_error gdf_mask_concat(cudf::valid_type* output_mask,
cudf::size_type output_column_length,
gdf_column *columns_to_concat[],
cudf::size_type num_columns) {
std::vector<cudf::valid_type*> h_masks(num_columns);
std::vector<cudf::size_type> h_column_lengths(num_columns);
std::transform(columns_to_concat, columns_to_concat + num_columns,
h_masks.begin(), [](auto col) { return col->valid; });
std::transform(columns_to_concat, columns_to_concat + num_columns,
h_column_lengths.begin(), [](auto col) { return col->size; });
rmm::device_vector<cudf::valid_type*> d_masks(h_masks);
rmm::device_vector<cudf::size_type> d_column_lengths(h_column_lengths);
cudf::valid_type** masks_to_concat = thrust::raw_pointer_cast(d_masks.data());
cudf::size_type* column_lengths = thrust::raw_pointer_cast(d_column_lengths.data());
// This lambda is executed in a thrust algorithm. Each thread computes and
// returns one cudf::valid_type element for the concatenated output mask
auto mask_concatenator = [=] __device__(cudf::size_type mask_index) {
cudf::valid_type output_m = 0;
int cur_mask_index = 0, cur_mask_start = 0;
int cur_mask_len = column_lengths[0];
// Each thread processes one GDF_VALID_BITSIZE worth of valid bits
for (int bit = 0; bit < GDF_VALID_BITSIZE; ++bit) {
cudf::size_type output_index = mask_index * GDF_VALID_BITSIZE + bit;
// stop when we are beyond the length of the output column (in elements)
if (output_index >= output_column_length) break;
// find the next column's mask when we step past the current column's
// length
while ((cur_mask_start + cur_mask_len <= output_index) &&
(cur_mask_index < num_columns - 1)) {
cur_mask_start += cur_mask_len;
cur_mask_len = column_lengths[++cur_mask_index];
}
// Set each valid bit at the right location in this thread's output
// cudf::valid_type Note: gdf_is_valid returns true when the input mask is a
// null pointer This makes it behave as if columns with null validity
// masks have masks of all 1s, which is the desired behavior.
cudf::size_type index = output_index - cur_mask_start;
if (gdf_is_valid(masks_to_concat[cur_mask_index], index)) {
output_m |= (1 << bit);
}
}
return output_m;
};
// This is like thrust::for_each where the lambda gets the current index into
// the output array as input
thrust::tabulate(rmm::exec_policy()->on(0), output_mask,
output_mask + gdf_num_bitmask_elements(output_column_length),
mask_concatenator);
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
gdf_error all_bitmask_on(cudf::valid_type* valid_out,
cudf::size_type& out_null_count,
cudf::size_type num_values, cudaStream_t stream) {
cudf::size_type num_bitmask_elements = gdf_num_bitmask_elements(num_values);
cudf::valid_type max_char = 255;
thrust::fill(rmm::exec_policy(stream)->on(stream), valid_out,
valid_out + num_bitmask_elements, max_char);
// we have no nulls so set all the bits in cudf::valid_type to 1
out_null_count = 0;
return GDF_SUCCESS;
}
gdf_error apply_bitmask_to_bitmask(cudf::size_type& out_null_count,
cudf::valid_type* valid_out,
const cudf::valid_type* valid_left,
const cudf::valid_type* valid_right,
cudaStream_t stream,
cudf::size_type num_values) {
cudf::size_type num_bitmask_elements = gdf_num_bitmask_elements(num_values);
thrust::transform(rmm::exec_policy(stream)->on(stream), valid_left,
valid_left + num_bitmask_elements, valid_right, valid_out,
thrust::bit_and<cudf::valid_type>());
cudf::size_type non_nulls;
auto error = gdf_count_nonzero_mask(valid_out, num_values, &non_nulls);
out_null_count = num_values - non_nulls;
return error;
}
namespace cudf {
namespace {
/**
* @brief Computes a bitmask from the bitwise AND of a set of bitmasks.
*/
struct bitwise_and {
bitwise_and(bit_mask::bit_mask_t** _masks, cudf::size_type _num_masks)
: masks{_masks}, num_masks(_num_masks) {}
__device__ inline bit_mask::bit_mask_t operator()(
cudf::size_type mask_element_index) {
using namespace bit_mask;
bit_mask_t result_mask{~bit_mask_t{0}}; // all 1s
for (cudf::size_type i = 0; i < num_masks; ++i) {
result_mask &= masks[i][mask_element_index];
}
return result_mask;
}
cudf::size_type num_masks;
bit_mask::bit_mask_t** masks;
};
} // namespace
rmm::device_vector<bit_mask::bit_mask_t> row_bitmask(cudf::table const& table,
cudaStream_t stream) {
using namespace bit_mask;
rmm::device_vector<bit_mask_t> row_bitmask(num_elements(table.num_rows()),
~bit_mask_t{0});
// Populate vector of pointers to the bitmasks of columns that contain
// NULL values
std::vector<bit_mask_t*> column_bitmasks{row_bitmask.data().get()};
std::for_each(
table.begin(), table.end(), [&column_bitmasks](gdf_column const* col) {
if ((nullptr != col->valid) and (col->null_count > 0)) {
column_bitmasks.push_back(reinterpret_cast<bit_mask_t*>(col->valid));
}
});
rmm::device_vector<bit_mask_t*> d_column_bitmasks{column_bitmasks};
// Compute bitwise AND of all key columns' bitmasks
thrust::tabulate(
rmm::exec_policy(stream)->on(stream), row_bitmask.begin(),
row_bitmask.end(),
bitwise_and(d_column_bitmasks.data().get(), d_column_bitmasks.size()));
return row_bitmask;
}
} // namespace cudf
|
93b1e5c01660ce908bf0f5f3348088d01f5355af.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#define WMMA
#include <cblas.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "gemm/utils.hpp"
#if TORCH_HIP_VERSION < 9000
// CUDA 9.0 introduces a new, light-weight barrier synchronization primitive
// that operates at the warp-scope. This is required to ensure visibility of
// reads/writes among threads that can make indepenent progress on Volta.
// For previous CUDA versions these synchronizations not necessary, and we
// define an empty function as a convenience for backward compatibility.
#ifndef __syncwarp
#define __syncwarp(...)
#endif // __syncwarp
#endif // TORCH_HIP_VERSION < 9000
#if 0
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning( \
disable : 4100 4101 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)
#elif defined __INTEL_COMPILER
#pragma warning push
#pragma warning disable 2196 279 1684 2259
#elif defined __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wall"
#pragma clang diagnostic ignored "-Wextra"
#pragma clang diagnostic ignored "-Wunused"
#pragma clang diagnostic ignored "-Wunused-parameter"
#pragma clang diagnostic ignored "-Wunused-variable"
#elif defined __GNUC__ && __GNUC__ >= 5
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wunused"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
#endif
// Cutlass GEMM API
#include <cutlass/gemm/dispatch.h>
#include <cutlass/gemm/epilogue_function.h>
#include <cutlass/util/util.h>
#ifdef PRINT_IF_ERROR
#undef PRINT_IF_ERROR
#endif // PRINT_IF_ERROR
#include "init/init.hpp"
#include "utils/utils.hpp"
#include "gemm/args.hpp"
#include "gemm/utils.hpp"
template <typename ValueT, typename AccumT,
cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static hipError_t cutlass_gemm(int M, int N, int K, AccumT* alpha, ValueT* A, ValueT* B,
AccumT* beta, AccumT* C) {
using namespace cutlass;
using namespace cutlass::gemm;
using value_t = ValueT;
using accum_t = AccumT;
constexpr auto accumulator_alignment = sizeof(accum_t);
constexpr auto operator_alignment = accumulator_alignment;
constexpr auto math_op =
(std::is_same<value_t, half>::value && std::is_same<accum_t, float>::value)
? math_operation_class_t::matrix
: math_operation_class_t::scalar;
constexpr auto TransformA = matrix_transform_t::Transpose;
constexpr auto TransformB = matrix_transform_t::Transpose;
// Define the epilogue functor
using epilogue_op_t = blas_scaled_epilogue<accum_t, accum_t, accum_t>;
const epilogue_op_t epilogue_op(*alpha, *beta);
const auto conf = cutlass::gemm::device_gemm<
tiling_strategy, //< Tile-sizing classification
math_op, //< Indicates which class of math operation to select
TransformA, //< Transformation op for matrix A
operator_alignment, //< Alignment (in bytes) of A operand
TransformB, //< Transformation op for matrix B
operator_alignment, //< Alignment (in bytes) of B operand
value_t, //< Multiplicand value type (matrices A and B)
accum_t, //< Accumulator value type (matrix C and scalars)
epilogue_op_t, //< Epilogue operation to update matrix C
accumulator_alignment //< Alignment (in bytes) of C operand
>(M, N, K, epilogue_op, B, A, C);
return conf.result;
}
template <typename ValueT, typename AccumT,
cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS(benchmark::State& state) {
static const std::string IMPLEMENTATION_NAME =
gemm::detail::implementation_name<ValueT, AccumT>();
state.SetLabel(fmt::format("CUTLASS/{}", IMPLEMENTATION_NAME));
if (!has_cuda) {
state.SkipWithError("CUDA/SGEMM no CUDA device found");
return;
}
const AccumT accumOne = gemm::detail::one<AccumT>();
const AccumT accumZero = gemm::detail::zero<AccumT>();
const ValueT valueOne = gemm::detail::one<ValueT>();
const ValueT valueZero = gemm::detail::zero<ValueT>();
const auto M = state.range(0);
const auto N = state.range(1);
const auto K = state.range(2);
AccumT alpha{accumOne};
AccumT beta{accumOne};
auto a = std::vector<ValueT>(M * K);
auto b = std::vector<ValueT>(K * N);
auto c = std::vector<AccumT>(M * N);
std::fill(a.begin(), a.end(), valueOne);
std::fill(b.begin(), b.end(), valueOne);
std::fill(c.begin(), c.end(), accumZero);
using accum_device_type = typename gemm::detail::cuda_type<AccumT>::type;
using value_device_type = typename gemm::detail::cuda_type<ValueT>::type;
value_device_type *d_a{nullptr}, *d_b{nullptr};
accum_device_type* d_c{nullptr};
if (PRINT_IF_ERROR(hipMalloc((void**) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix A",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix A",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(hipFree(d_a));
if (PRINT_IF_ERROR(hipMalloc((void**) &d_b, b.size() * sizeof(*b.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix B",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix B",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(hipFree(d_b));
if (PRINT_IF_ERROR(hipMalloc((void**) &d_c, c.size() * sizeof(*c.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix C",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix C",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(hipFree(d_c));
if (PRINT_IF_ERROR(hipblasSetMatrix(M, K, sizeof(*a.data()), a.data(), M, d_a, M))) {
LOG(critical, "CUTLASS/{} setting of A matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of A matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
if (PRINT_IF_ERROR(hipblasSetMatrix(K, N, sizeof(*b.data()), b.data(), K, d_b, K))) {
LOG(critical, "CUTLASS/{} setting of B matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of B matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
if (PRINT_IF_ERROR(hipblasSetMatrix(M, N, sizeof(*c.data()), c.data(), M, d_c, M))) {
LOG(critical, "CUTLASS/{} setting of C matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of C matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
for (auto _ : state) {
hipEventRecord(start, NULL);
const auto cutlass_err =
cutlass_gemm<value_device_type, accum_device_type, tiling_strategy>(
M, N, K, reinterpret_cast<accum_device_type*>(&alpha), d_a, d_b,
reinterpret_cast<accum_device_type*>(&beta), d_c);
hipEventRecord(stop, NULL);
const auto cuda_err = hipEventSynchronize(stop);
state.PauseTiming();
if (PRINT_IF_ERROR(cutlass_err)) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to launch kernel", IMPLEMENTATION_NAME).c_str());
break;
}
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to synchronize kernel", IMPLEMENTATION_NAME)
.c_str());
break;
}
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to get elapsed time", IMPLEMENTATION_NAME)
.c_str());
break;
}
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert(
{{"M", M},
{"N", N},
{"K", K},
{"num_elements", M * N * K},
{"flops",
{state.iterations() * 2.0 * M * N * K, benchmark::Counter::kAvgThreadsRate}}});
state.SetBytesProcessed(int64_t(state.iterations()) * a.size() * b.size() * c.size());
state.SetItemsProcessed(int64_t(state.iterations()) * M * N * K);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_HGEMM(benchmark::State& state) {
return CUTLASS<__half, __half, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_WGEMM(benchmark::State& state) {
return CUTLASS<half, float, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_SGEMM(benchmark::State& state) {
return CUTLASS<float, float, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_DGEMM(benchmark::State& state) {
return CUTLASS<double, double, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_I8GEMM(benchmark::State& state) {
return CUTLASS<int8_t, int8_t, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_I32GEMM(benchmark::State& state) {
return CUTLASS<int32_t, int32_t, tiling_strategy>(state);
}
// up to 512
#define BENCHMARK_SMALL_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Small) \
->Args({16, 16, 16}) \
->Args({32, 32, 32}) \
->Args({48, 48, 48}) \
->Args({64, 64, 64}) \
->Args({96, 96, 96}) \
->Args({128, 128, 128}) \
->Args({192, 192, 192}) \
->Args({256, 256, 256}) \
->Args({512, 512, 512})
// up to 2048
#define BENCHMARK_MEDIUM_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Medium) \
->Args({768, 768, 768}) \
->Args({1024, 1024, 1024}) \
->Args({1536, 1536, 1536}) \
->Args({2048, 2048, 2048})
// up to 3584
#define BENCHMARK_LARGE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Large) \
->Args({2560, 2560, 2560}) \
->Args({3072, 3072, 3072}) \
->Args({3584, 3584, 3584})
#define BENCHMARK_HUGE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Huge) \
->Args({4096, 4096, 4096}) \
->Args({5120, 5120, 5120}) \
->Args({6144, 6144, 6144}) \
->Args({7168, 7168, 7168}) \
->Args({8192, 8192, 8192}) \
->Args({9216, 9216, 9216}) \
->Args({9728, 9728, 9728}) \
->Args({10240, 10240, 10240}) \
->Args({10752, 10752, 10752}) \
->Args({11264, 11264, 11264}) \
->Args({11776, 11776, 11776}) \
->Args({12288, 12288, 12288}) \
->Args({12800, 12800, 12800}) \
->Args({13312, 13312, 13312}) \
->Args({13824, 13824, 13824}) \
->Args({14336, 14336, 14336}) \
->Args({14848, 14848, 14848}) \
->Args({15360, 15360, 15360}) \
->Args({15872, 15872, 15872}) \
->Args({16384, 16384, 16384}) \
->Args({16896, 16896, 16896}) \
->Args({17408, 17408, 17408}) \
->Args({17920, 17920, 17920}) \
->Args({18432, 18432, 18432}) \
->Args({18944, 18944, 18944}) \
->Args({19456, 19456, 19456}) \
->Args({19968, 19968, 19968}) \
->Args({20480, 20480, 20480}) \
->Args({20992, 20992, 20992}) \
->Args({21504, 21504, 21504}) \
->Args({22016, 22016, 22016}) \
->Args({22528, 22528, 22528}) \
->Args({23040, 23040, 23040}) \
->Args({23552, 23552, 23552}) \
->Args({24064, 24064, 24064}) \
->Args({24576, 24576, 24576}) \
->Args({25088, 25088, 25088}) \
->Args({25600, 25600, 25600}) \
->Args({26112, 26112, 26112}) \
->Args({26624, 26624, 26624}) \
->Args({27136, 27136, 27136}) \
->Args({27648, 27648, 27648}) \
->Args({28160, 28160, 28160})
#define BENCHMARK_WIDE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Wide) \
->Args({128, 169, 1728}) \
->Args({128, 729, 1200}) \
->Args({192, 169, 1728})
#define BENCHMARK_TALL_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Tall) \
->Args({512, 1, 500000}) \
->Args({1024, 1, 500000}) \
->Args({512, 2, 500000}) \
->Args({1024, 2, 500000}) \
->Args({512, 4, 500000}) \
->Args({1024, 4, 500000})
#define BENCHMARK_CUTLASS(b) \
BENCHMARK_SMALL_TILING(b)->UseManualTime(); \
BENCHMARK_MEDIUM_TILING(b)->UseManualTime(); \
BENCHMARK_LARGE_TILING(b)->UseManualTime(); \
BENCHMARK_HUGE_TILING(b)->UseManualTime();
#if 0
BENCHMARK_LARGE_TILING(b)->UseManualTime(); \
BENCHMARK_HUGE_TILING(b)->UseManualTime(); \
BENCHMARK_WIDE_TILING(b)->UseManualTime(); \
BENCHMARK_TALL_TILING(b)->UseManualTime()
#endif
BENCHMARK_CUTLASS(CUTLASS_HGEMM);
BENCHMARK_CUTLASS(CUTLASS_WGEMM);
// BENCHMARK_CUTLASS(CUTLASS_SGEMM);
// BENCHMARK_CUTLASS(CUTLASS_DGEMM);
// BENCHMARK_CUTLASS(CUTLASS_I32GEMM);
// BENCHMARK_CUTLASS(CUTLASS_I8GEMM);
#if 0
#ifdef _MSC_VER
#pragma warning(pop)
#elif defined __INTEL_COMPILER
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
#elif defined __GNUC__ && __GNUC__ >= 5
#pragma GCC diagnostic pop
#endif
#endif
| 93b1e5c01660ce908bf0f5f3348088d01f5355af.cu | #include <benchmark/benchmark.h>
#define WMMA
#include <cblas.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "gemm/utils.hpp"
#if CUDA_VERSION < 9000
// CUDA 9.0 introduces a new, light-weight barrier synchronization primitive
// that operates at the warp-scope. This is required to ensure visibility of
// reads/writes among threads that can make indepenent progress on Volta.
// For previous CUDA versions these synchronizations not necessary, and we
// define an empty function as a convenience for backward compatibility.
#ifndef __syncwarp
#define __syncwarp(...)
#endif // __syncwarp
#endif // CUDA_VERSION < 9000
#if 0
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning( \
disable : 4100 4101 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)
#elif defined __INTEL_COMPILER
#pragma warning push
#pragma warning disable 2196 279 1684 2259
#elif defined __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wall"
#pragma clang diagnostic ignored "-Wextra"
#pragma clang diagnostic ignored "-Wunused"
#pragma clang diagnostic ignored "-Wunused-parameter"
#pragma clang diagnostic ignored "-Wunused-variable"
#elif defined __GNUC__ && __GNUC__ >= 5
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wunused"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
#endif
// Cutlass GEMM API
#include <cutlass/gemm/dispatch.h>
#include <cutlass/gemm/epilogue_function.h>
#include <cutlass/util/util.h>
#ifdef PRINT_IF_ERROR
#undef PRINT_IF_ERROR
#endif // PRINT_IF_ERROR
#include "init/init.hpp"
#include "utils/utils.hpp"
#include "gemm/args.hpp"
#include "gemm/utils.hpp"
template <typename ValueT, typename AccumT,
cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static cudaError_t cutlass_gemm(int M, int N, int K, AccumT* alpha, ValueT* A, ValueT* B,
AccumT* beta, AccumT* C) {
using namespace cutlass;
using namespace cutlass::gemm;
using value_t = ValueT;
using accum_t = AccumT;
constexpr auto accumulator_alignment = sizeof(accum_t);
constexpr auto operator_alignment = accumulator_alignment;
constexpr auto math_op =
(std::is_same<value_t, half>::value && std::is_same<accum_t, float>::value)
? math_operation_class_t::matrix
: math_operation_class_t::scalar;
constexpr auto TransformA = matrix_transform_t::Transpose;
constexpr auto TransformB = matrix_transform_t::Transpose;
// Define the epilogue functor
using epilogue_op_t = blas_scaled_epilogue<accum_t, accum_t, accum_t>;
const epilogue_op_t epilogue_op(*alpha, *beta);
const auto conf = cutlass::gemm::device_gemm<
tiling_strategy, //< Tile-sizing classification
math_op, //< Indicates which class of math operation to select
TransformA, //< Transformation op for matrix A
operator_alignment, //< Alignment (in bytes) of A operand
TransformB, //< Transformation op for matrix B
operator_alignment, //< Alignment (in bytes) of B operand
value_t, //< Multiplicand value type (matrices A and B)
accum_t, //< Accumulator value type (matrix C and scalars)
epilogue_op_t, //< Epilogue operation to update matrix C
accumulator_alignment //< Alignment (in bytes) of C operand
>(M, N, K, epilogue_op, B, A, C);
return conf.result;
}
template <typename ValueT, typename AccumT,
cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS(benchmark::State& state) {
static const std::string IMPLEMENTATION_NAME =
gemm::detail::implementation_name<ValueT, AccumT>();
state.SetLabel(fmt::format("CUTLASS/{}", IMPLEMENTATION_NAME));
if (!has_cuda) {
state.SkipWithError("CUDA/SGEMM no CUDA device found");
return;
}
const AccumT accumOne = gemm::detail::one<AccumT>();
const AccumT accumZero = gemm::detail::zero<AccumT>();
const ValueT valueOne = gemm::detail::one<ValueT>();
const ValueT valueZero = gemm::detail::zero<ValueT>();
const auto M = state.range(0);
const auto N = state.range(1);
const auto K = state.range(2);
AccumT alpha{accumOne};
AccumT beta{accumOne};
auto a = std::vector<ValueT>(M * K);
auto b = std::vector<ValueT>(K * N);
auto c = std::vector<AccumT>(M * N);
std::fill(a.begin(), a.end(), valueOne);
std::fill(b.begin(), b.end(), valueOne);
std::fill(c.begin(), c.end(), accumZero);
using accum_device_type = typename gemm::detail::cuda_type<AccumT>::type;
using value_device_type = typename gemm::detail::cuda_type<ValueT>::type;
value_device_type *d_a{nullptr}, *d_b{nullptr};
accum_device_type* d_c{nullptr};
if (PRINT_IF_ERROR(cudaMalloc((void**) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix A",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix A",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(cudaFree(d_a));
if (PRINT_IF_ERROR(cudaMalloc((void**) &d_b, b.size() * sizeof(*b.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix B",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix B",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(cudaFree(d_b));
if (PRINT_IF_ERROR(cudaMalloc((void**) &d_c, c.size() * sizeof(*c.data())))) {
LOG(critical, "CUTLASS/{} device memory allocation failed for matrix C",
IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} device memory allocation failed for matrix C",
IMPLEMENTATION_NAME)
.c_str());
return;
}
defer(cudaFree(d_c));
if (PRINT_IF_ERROR(cublasSetMatrix(M, K, sizeof(*a.data()), a.data(), M, d_a, M))) {
LOG(critical, "CUTLASS/{} setting of A matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of A matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
if (PRINT_IF_ERROR(cublasSetMatrix(K, N, sizeof(*b.data()), b.data(), K, d_b, K))) {
LOG(critical, "CUTLASS/{} setting of B matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of B matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
if (PRINT_IF_ERROR(cublasSetMatrix(M, N, sizeof(*c.data()), c.data(), M, d_c, M))) {
LOG(critical, "CUTLASS/{} setting of C matrix failed", IMPLEMENTATION_NAME);
state.SkipWithError(
fmt::format("CUTLASS/{} setting of C matrix failed", IMPLEMENTATION_NAME)
.c_str());
return;
}
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
for (auto _ : state) {
cudaEventRecord(start, NULL);
const auto cutlass_err =
cutlass_gemm<value_device_type, accum_device_type, tiling_strategy>(
M, N, K, reinterpret_cast<accum_device_type*>(&alpha), d_a, d_b,
reinterpret_cast<accum_device_type*>(&beta), d_c);
cudaEventRecord(stop, NULL);
const auto cuda_err = cudaEventSynchronize(stop);
state.PauseTiming();
if (PRINT_IF_ERROR(cutlass_err)) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to launch kernel", IMPLEMENTATION_NAME).c_str());
break;
}
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to synchronize kernel", IMPLEMENTATION_NAME)
.c_str());
break;
}
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError(
fmt::format("CUTLASS/{} failed to get elapsed time", IMPLEMENTATION_NAME)
.c_str());
break;
}
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert(
{{"M", M},
{"N", N},
{"K", K},
{"num_elements", M * N * K},
{"flops",
{state.iterations() * 2.0 * M * N * K, benchmark::Counter::kAvgThreadsRate}}});
state.SetBytesProcessed(int64_t(state.iterations()) * a.size() * b.size() * c.size());
state.SetItemsProcessed(int64_t(state.iterations()) * M * N * K);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_HGEMM(benchmark::State& state) {
return CUTLASS<__half, __half, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_WGEMM(benchmark::State& state) {
return CUTLASS<half, float, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_SGEMM(benchmark::State& state) {
return CUTLASS<float, float, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_DGEMM(benchmark::State& state) {
return CUTLASS<double, double, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_I8GEMM(benchmark::State& state) {
return CUTLASS<int8_t, int8_t, tiling_strategy>(state);
}
template <cutlass::gemm::tiling_strategy::kind_t tiling_strategy>
static void CUTLASS_I32GEMM(benchmark::State& state) {
return CUTLASS<int32_t, int32_t, tiling_strategy>(state);
}
// up to 512
#define BENCHMARK_SMALL_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Small) \
->Args({16, 16, 16}) \
->Args({32, 32, 32}) \
->Args({48, 48, 48}) \
->Args({64, 64, 64}) \
->Args({96, 96, 96}) \
->Args({128, 128, 128}) \
->Args({192, 192, 192}) \
->Args({256, 256, 256}) \
->Args({512, 512, 512})
// up to 2048
#define BENCHMARK_MEDIUM_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Medium) \
->Args({768, 768, 768}) \
->Args({1024, 1024, 1024}) \
->Args({1536, 1536, 1536}) \
->Args({2048, 2048, 2048})
// up to 3584
#define BENCHMARK_LARGE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Large) \
->Args({2560, 2560, 2560}) \
->Args({3072, 3072, 3072}) \
->Args({3584, 3584, 3584})
#define BENCHMARK_HUGE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Huge) \
->Args({4096, 4096, 4096}) \
->Args({5120, 5120, 5120}) \
->Args({6144, 6144, 6144}) \
->Args({7168, 7168, 7168}) \
->Args({8192, 8192, 8192}) \
->Args({9216, 9216, 9216}) \
->Args({9728, 9728, 9728}) \
->Args({10240, 10240, 10240}) \
->Args({10752, 10752, 10752}) \
->Args({11264, 11264, 11264}) \
->Args({11776, 11776, 11776}) \
->Args({12288, 12288, 12288}) \
->Args({12800, 12800, 12800}) \
->Args({13312, 13312, 13312}) \
->Args({13824, 13824, 13824}) \
->Args({14336, 14336, 14336}) \
->Args({14848, 14848, 14848}) \
->Args({15360, 15360, 15360}) \
->Args({15872, 15872, 15872}) \
->Args({16384, 16384, 16384}) \
->Args({16896, 16896, 16896}) \
->Args({17408, 17408, 17408}) \
->Args({17920, 17920, 17920}) \
->Args({18432, 18432, 18432}) \
->Args({18944, 18944, 18944}) \
->Args({19456, 19456, 19456}) \
->Args({19968, 19968, 19968}) \
->Args({20480, 20480, 20480}) \
->Args({20992, 20992, 20992}) \
->Args({21504, 21504, 21504}) \
->Args({22016, 22016, 22016}) \
->Args({22528, 22528, 22528}) \
->Args({23040, 23040, 23040}) \
->Args({23552, 23552, 23552}) \
->Args({24064, 24064, 24064}) \
->Args({24576, 24576, 24576}) \
->Args({25088, 25088, 25088}) \
->Args({25600, 25600, 25600}) \
->Args({26112, 26112, 26112}) \
->Args({26624, 26624, 26624}) \
->Args({27136, 27136, 27136}) \
->Args({27648, 27648, 27648}) \
->Args({28160, 28160, 28160})
#define BENCHMARK_WIDE_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Wide) \
->Args({128, 169, 1728}) \
->Args({128, 729, 1200}) \
->Args({192, 169, 1728})
#define BENCHMARK_TALL_TILING(b) \
BENCHMARK_TEMPLATE(b, cutlass::gemm::tiling_strategy::Tall) \
->Args({512, 1, 500000}) \
->Args({1024, 1, 500000}) \
->Args({512, 2, 500000}) \
->Args({1024, 2, 500000}) \
->Args({512, 4, 500000}) \
->Args({1024, 4, 500000})
#define BENCHMARK_CUTLASS(b) \
BENCHMARK_SMALL_TILING(b)->UseManualTime(); \
BENCHMARK_MEDIUM_TILING(b)->UseManualTime(); \
BENCHMARK_LARGE_TILING(b)->UseManualTime(); \
BENCHMARK_HUGE_TILING(b)->UseManualTime();
#if 0
BENCHMARK_LARGE_TILING(b)->UseManualTime(); \
BENCHMARK_HUGE_TILING(b)->UseManualTime(); \
BENCHMARK_WIDE_TILING(b)->UseManualTime(); \
BENCHMARK_TALL_TILING(b)->UseManualTime()
#endif
BENCHMARK_CUTLASS(CUTLASS_HGEMM);
BENCHMARK_CUTLASS(CUTLASS_WGEMM);
// BENCHMARK_CUTLASS(CUTLASS_SGEMM);
// BENCHMARK_CUTLASS(CUTLASS_DGEMM);
// BENCHMARK_CUTLASS(CUTLASS_I32GEMM);
// BENCHMARK_CUTLASS(CUTLASS_I8GEMM);
#if 0
#ifdef _MSC_VER
#pragma warning(pop)
#elif defined __INTEL_COMPILER
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
#elif defined __GNUC__ && __GNUC__ >= 5
#pragma GCC diagnostic pop
#endif
#endif
|
3b8ef1c053aaf0d2ce5ec46d65aff1daadb580f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void getMaxSpeed_gpu( const float* values, float* currentMaxSpeed) {
if (sqrt(values[1]*values[1]+values[2]*values[2]) > sqrt(currentMaxSpeed[1]*currentMaxSpeed[1]+currentMaxSpeed[2]*currentMaxSpeed[2])) {
currentMaxSpeed[0] = values[0];
currentMaxSpeed[1] = values[1];
currentMaxSpeed[2] = values[2];
currentMaxSpeed[3] = values[3];
}
}
// CUDA kernel function
__global__ void op_cuda_getMaxSpeed(
const float *__restrict arg0,
float *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getMaxSpeed_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_getMaxSpeed(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(24);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[24].name = name;
OP_kernels[24].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getMaxSpeed");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_24
int nthread = OP_BLOCK_SIZE_24;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_getMaxSpeed), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[24].time += wall_t2 - wall_t1;
OP_kernels[24].transfer += (float)set->size * arg0.size;
OP_kernels[24].transfer += (float)set->size * arg1.size * 2.0f;
}
| 3b8ef1c053aaf0d2ce5ec46d65aff1daadb580f2.cu | //
// auto-generated by op2.py
//
//user function
__device__ void getMaxSpeed_gpu( const float* values, float* currentMaxSpeed) {
if (sqrt(values[1]*values[1]+values[2]*values[2]) > sqrt(currentMaxSpeed[1]*currentMaxSpeed[1]+currentMaxSpeed[2]*currentMaxSpeed[2])) {
currentMaxSpeed[0] = values[0];
currentMaxSpeed[1] = values[1];
currentMaxSpeed[2] = values[2];
currentMaxSpeed[3] = values[3];
}
}
// CUDA kernel function
__global__ void op_cuda_getMaxSpeed(
const float *__restrict arg0,
float *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getMaxSpeed_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_getMaxSpeed(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(24);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[24].name = name;
OP_kernels[24].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getMaxSpeed");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_24
int nthread = OP_BLOCK_SIZE_24;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
op_cuda_getMaxSpeed<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[24].time += wall_t2 - wall_t1;
OP_kernels[24].transfer += (float)set->size * arg0.size;
OP_kernels[24].transfer += (float)set->size * arg1.size * 2.0f;
}
|
1243138200e6ba04508f1c452d9798dcf7aef30d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include "bmp.h"
}
typedef struct Color {
unsigned int r, g, b;
} Color;
#define THREADS 1024
void CheckCudaError(char sms[], int line) {
hipError_t error;
error = hipGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, hipGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int square(int value) {
return value * value;
}
void display_means(Color means[], int counts[], int N_colors) {
int i;
for (i = 0; i < N_colors; ++i) {
fprintf(stderr, "mean %d: ", i);
fprintf(stderr, "r: %d, ", means[i].r);
fprintf(stderr, "g: %d, ", means[i].g);
fprintf(stderr, "b: %d, ", means[i].b);
fprintf(stderr, "count: %d\n", counts[i]);
}
fprintf(stderr, "\n");
}
void display_assigns(int assigns[], int Size) {
int i;
for (i = 0; i < Size; ++i) {
fprintf(stderr, "%d: %d\n", i, assigns[i]);
}
}
void init_means(Color means[], unsigned char *im, int Size_row, int N_colors, int Size) {
int r;
int i;
for (i = 0; i < N_colors; ++i) {
r = rand() % Size;
int index = (r*3/Size_row) * Size_row + ((r*3)%Size_row);
means[i].r = im[index+2];
means[i].g = im[index+1];
means[i].b = im[index];
}
}
void find_best_mean_seq(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int i;
for (i = 0; i < N; ++i) {
int j;
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[i] = assign;
}
}
__global__ void find_best_mean_par(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
}
}
void divide_sums_by_counts_seq(Color means_host[], int N_colors, Color new_means[], int counts[]) {
int i;
for (i = 0; i < N_colors; ++i) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[i] == 0) counts[i] = 1;
means_host[i].r = new_means[i].r / counts[i];
means_host[i].g = new_means[i].g / counts[i];
means_host[i].b = new_means[i].b / counts[i];
}
}
__global__ void divide_sums_by_counts_par(Color means_device[], int N_colors, Color new_means[], int counts[]) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N_colors) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[id] == 0) counts[id] = 1;
means_device[id].r = new_means[id].r / counts[id];
means_device[id].g = new_means[id].g / counts[id];
means_device[id].b = new_means[id].b / counts[id];
}
}
void sum_up_and_count_points_seq(Color new_means[], int assigns[], unsigned char *im, int counts[], int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int imeans = assigns[i];
new_means[imeans].r += im[index+2];
new_means[imeans].g += im[index+1];
new_means[imeans].b += im[index];
counts[imeans] += 1;
}
}
__global__ void matrix_reduction_color(Color new_means[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
shared[tid*N_colors + j] = im[index+offset];
}
else {
shared[tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means[blockIdx.x*N_colors + j].g = shared[j];
else new_means[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_color_2(Color new_means_2[], Color new_means[], int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) shared[tid*N_colors + j] = new_means[id*N_colors + j].r + new_means[blockDim.x*N_colors + id *N_colors + j].r;
else if (offset == 1) shared[tid*N_colors + j] = new_means[id*N_colors + j].g + new_means[blockDim.x*N_colors + id * N_colors + j].g;
else shared[tid*N_colors + j] = new_means[id*N_colors + j].b + new_means[blockDim.x*N_colors + id *N_colors + j].b;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means_2[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means_2[blockIdx.x*N_colors + j].g = shared[j];
else new_means_2[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_count(int counts[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
shared[tid*N_colors + j] = 1;
}
else {
shared[tid*N_colors + j] = 0;
}
}
__syncthreads();
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void matrix_reduction_count_2(int counts_2[], int counts[], int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] = counts[id*N_colors + j] + counts[blockDim.x*N_colors + (id * N_colors) + j];
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts_2[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void sum_up_and_count_points_par(Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int N_colors, int s_counts[], Color s_new_means[]) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//inicialitzar
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = im[index+2];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = im[index+1];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = im[index];
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 1;
}
else {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = 0;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
for (int j = 0; j < N_colors; ++j) {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r += s_new_means[(tid + s)*N_colors + j].r;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g += s_new_means[(tid + s)*N_colors + j].g;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b += s_new_means[(tid + s)*N_colors + j].b;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] += s_counts[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
__syncthreads();
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
new_means[blockIdx.x*N_colors + j].r = s_new_means[j].r;
new_means[blockIdx.x*N_colors + j].g = s_new_means[j].g;
new_means[blockIdx.x*N_colors + j].b = s_new_means[j].b;
counts[j] = s_counts[j];
}
}
}
__global__ void findandsum(Color means[],Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int ncolors) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
atomicAdd(&new_means[assign].r, im[index+2]);
atomicAdd(&new_means[assign].g, im[index+1]);
atomicAdd(&new_means[assign].b, im[index]);
atomicAdd(&counts[assign], 1);
}
}
void assign_colors_seq(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
im[index]=means[assigns[i]].b;
im[index + 1]=means[assigns[i]].g;
im[index + 2]=means[assigns[i]].r;
}
}
__global__ void assign_colors_par(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
im[index]=means[assigns[id]].b;
im[index + 1]=means[assigns[id]].g;
im[index + 2]=means[assigns[id]].r;
}
}
int main(int c, char *v[])
{
int N_colors;
if (c < 4 || c > 5) {
fprintf(stderr, "usage: %s ppm_file n_iterations seed n_colors\n", v[0]);
return -1;
}
else if (c == 4) N_colors = 16;
else if (c == 5) N_colors = atoi(v[4]) ? : 16;
//read image:
bmpInfoHeader infoHeader;
unsigned char *im_host = LoadBMP(v[1], &infoHeader);
//init variables:
float elapsedTime;
int N_iterations = atoi(v[2]);
int Size_row = ((infoHeader.width*3 + 3) / 4) * 4;
int width = infoHeader.width;
int height = infoHeader.height;
int Size = width * height;
//init seed
srand(atoi(v[3]));
//init grid, block, nThreads:
unsigned int nBlocks, nBlocksMeans, nThreads;
nThreads = THREADS;
nBlocks = (Size + nThreads - 1)/nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
nBlocksMeans = (N_colors + nThreads - 1)/nThreads;
dim3 dimGridMeans(nBlocksMeans, 1, 1);
//obtenir memoria HOST:
Color *means_host;
means_host = (Color*) malloc(N_colors*sizeof(Color));
int *counts_host;
counts_host = (int*) malloc(sizeof(int) * N_colors);
Color *means_host_red;
means_host_red = (Color*) malloc((nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
int *counts_host_red;
counts_host_red = (int*) malloc((nBlocks/(2*nThreads)) * sizeof(int) * N_colors);
//inicialitzar means:
init_means(means_host, im_host, Size_row, N_colors, Size);
//cuda events:
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//obtenir memoria DEVICE:
Color *means_device;
Color *new_means;
int *counts;
Color *new_means_2;
int *counts_2;
int *assigns;
unsigned char *im_device;
hipMalloc((Color**)&means_device, N_colors*sizeof(Color));
hipMalloc((Color**)&new_means, nBlocks * N_colors*sizeof(Color));
hipMalloc((int**)&counts, nBlocks * N_colors * sizeof (int));
hipMalloc((Color**)&new_means_2, (nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
hipMalloc((int**)&counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof (int));
hipMalloc((int**)&assigns, Size*sizeof(int));
hipMalloc((unsigned char**)&im_device, infoHeader.imgsize* sizeof(unsigned char));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
//copiar dades al device:
hipMemcpy(im_device, im_host, infoHeader.imgsize * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(means_device, means_host, N_colors*sizeof(Color), hipMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
int shared_memory_size = N_colors*THREADS * sizeof(unsigned int);
//START RECORD!!
hipEventRecord(start, 0);
//executem k means:
int it;
for (it = 0; it < N_iterations; ++it) {
//set counts and new_means to 0
hipMemset (counts, 0, nBlocks * sizeof (int) * N_colors);
hipMemset (new_means, 0, nBlocks * sizeof (Color) * N_colors);
//for each pixel find the best mean.
hipLaunchKernelGGL(( find_best_mean_par), dim3(dimGrid), dim3(dimBlock), 0, 0, means_device, assigns, im_device, Size, N_colors, Size_row);
hipDeviceSynchronize();
/*
//Sum up and count points for each cluster.
sum_up_and_count_points_par<<<dimGrid, dimBlock>>>(new_means, assigns, im_device, counts, Size_row, Size, N_colors, s_counts, s_new_means);
hipDeviceSynchronize();
*/
hipLaunchKernelGGL(( matrix_reduction_count), dim3(dimGrid), dim3(dimBlock), shared_memory_size, 0, counts, assigns, im_device, Size_row, Size, N_colors);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(dimGrid), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 2);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(dimGrid), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 1);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(dimGrid), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 0);
hipDeviceSynchronize();
//volmemos a hacer otra reduccion
hipLaunchKernelGGL(( matrix_reduction_count_2), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, counts_2, counts, Size_row, Size, N_colors);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 2);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 1);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 0);
hipDeviceSynchronize();
hipMemcpy(means_host_red, new_means_2, (nBlocks/(2*nThreads)) * N_colors * sizeof(Color), hipMemcpyDeviceToHost);
hipMemcpy(counts_host_red, counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof(int), hipMemcpyDeviceToHost);
memset(counts_host, 0, sizeof (int) * N_colors);
memset(means_host, 0, sizeof (Color) * N_colors);
int i, j;
for (i = 0; i < nBlocks/(2*nThreads); ++i) {
for (j = 0; j < N_colors; ++j) {
counts_host[j] += counts_host_red[i*N_colors + j];
means_host[j].r += means_host_red[i*N_colors + j].r;
means_host[j].g += means_host_red[i*N_colors + j].g;
means_host[j].b += means_host_red[i*N_colors + j].b;
}
}
//aqui tenemos los vectores finales ya reducidos
hipMemcpy(new_means, means_host, N_colors * sizeof(Color), hipMemcpyHostToDevice);
hipMemcpy(counts, counts_host, N_colors * sizeof(int), hipMemcpyHostToDevice);
/*
findandsum<<<dimGrid, dimBlock>>>(means_device,new_means, assigns, im_device, counts, Size_row, Size, N_colors);
hipDeviceSynchronize();
*/
//Divide sums by counts to get new centroids.
hipLaunchKernelGGL(( divide_sums_by_counts_par), dim3(dimGridMeans), dim3(dimBlock), 0, 0, means_device, N_colors, new_means, counts);
hipDeviceSynchronize();
}
//assignem colors:
hipLaunchKernelGGL(( assign_colors_par), dim3(dimGrid), dim3(dimBlock), 0, 0, means_device, assigns, im_device, Size_row, Size);
//copy to host:
hipMemcpy(im_host, im_device, infoHeader.imgsize * sizeof(unsigned char), hipMemcpyDeviceToHost);
//STOP RECORD!!
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
//save image
SaveBMP("sortida.bmp", &infoHeader, im_host);
DisplayInfo("sortida.bmp", &infoHeader);
int bytes_read_written = 2 * infoHeader.imgsize* sizeof(unsigned char) + //leer imagen y copiarla
N_iterations * ( //en cada iteracion se hace:
sizeof (int) * 2 * N_colors + //leer y modificar counts
sizeof (Color) * N_colors + //leer y modificar medias
Size * 2 * sizeof(int) + //leer y modificar las asignaciones
Size * 3 * sizeof (unsigned char) //leer datos de imagen
);
printf("\Quantization CUDA\n");
printf("Image Size: %d\n", Size);
printf("nThreads: %d\n", nThreads);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Total Versio 4 = %4.6f ms\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
//alliberar memoria HOST:
free(im_host);
free(means_host);
//alliberar memoria DEVICE:
hipFree(means_device);
hipFree(new_means);
hipFree(new_means_2);
hipFree(assigns);
hipFree(im_device);
hipFree(counts);
hipFree(counts_2);
return 0;
}
| 1243138200e6ba04508f1c452d9798dcf7aef30d.cu | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include "bmp.h"
}
typedef struct Color {
unsigned int r, g, b;
} Color;
#define THREADS 1024
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int square(int value) {
return value * value;
}
void display_means(Color means[], int counts[], int N_colors) {
int i;
for (i = 0; i < N_colors; ++i) {
fprintf(stderr, "mean %d: ", i);
fprintf(stderr, "r: %d, ", means[i].r);
fprintf(stderr, "g: %d, ", means[i].g);
fprintf(stderr, "b: %d, ", means[i].b);
fprintf(stderr, "count: %d\n", counts[i]);
}
fprintf(stderr, "\n");
}
void display_assigns(int assigns[], int Size) {
int i;
for (i = 0; i < Size; ++i) {
fprintf(stderr, "%d: %d\n", i, assigns[i]);
}
}
void init_means(Color means[], unsigned char *im, int Size_row, int N_colors, int Size) {
int r;
int i;
for (i = 0; i < N_colors; ++i) {
r = rand() % Size;
int index = (r*3/Size_row) * Size_row + ((r*3)%Size_row);
means[i].r = im[index+2];
means[i].g = im[index+1];
means[i].b = im[index];
}
}
void find_best_mean_seq(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int i;
for (i = 0; i < N; ++i) {
int j;
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[i] = assign;
}
}
__global__ void find_best_mean_par(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
}
}
void divide_sums_by_counts_seq(Color means_host[], int N_colors, Color new_means[], int counts[]) {
int i;
for (i = 0; i < N_colors; ++i) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[i] == 0) counts[i] = 1;
means_host[i].r = new_means[i].r / counts[i];
means_host[i].g = new_means[i].g / counts[i];
means_host[i].b = new_means[i].b / counts[i];
}
}
__global__ void divide_sums_by_counts_par(Color means_device[], int N_colors, Color new_means[], int counts[]) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N_colors) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[id] == 0) counts[id] = 1;
means_device[id].r = new_means[id].r / counts[id];
means_device[id].g = new_means[id].g / counts[id];
means_device[id].b = new_means[id].b / counts[id];
}
}
void sum_up_and_count_points_seq(Color new_means[], int assigns[], unsigned char *im, int counts[], int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int imeans = assigns[i];
new_means[imeans].r += im[index+2];
new_means[imeans].g += im[index+1];
new_means[imeans].b += im[index];
counts[imeans] += 1;
}
}
__global__ void matrix_reduction_color(Color new_means[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
shared[tid*N_colors + j] = im[index+offset];
}
else {
shared[tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means[blockIdx.x*N_colors + j].g = shared[j];
else new_means[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_color_2(Color new_means_2[], Color new_means[], int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) shared[tid*N_colors + j] = new_means[id*N_colors + j].r + new_means[blockDim.x*N_colors + id *N_colors + j].r;
else if (offset == 1) shared[tid*N_colors + j] = new_means[id*N_colors + j].g + new_means[blockDim.x*N_colors + id * N_colors + j].g;
else shared[tid*N_colors + j] = new_means[id*N_colors + j].b + new_means[blockDim.x*N_colors + id *N_colors + j].b;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means_2[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means_2[blockIdx.x*N_colors + j].g = shared[j];
else new_means_2[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_count(int counts[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
shared[tid*N_colors + j] = 1;
}
else {
shared[tid*N_colors + j] = 0;
}
}
__syncthreads();
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void matrix_reduction_count_2(int counts_2[], int counts[], int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] = counts[id*N_colors + j] + counts[blockDim.x*N_colors + (id * N_colors) + j];
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts_2[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void sum_up_and_count_points_par(Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int N_colors, int s_counts[], Color s_new_means[]) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//inicialitzar
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = im[index+2];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = im[index+1];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = im[index];
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 1;
}
else {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = 0;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
for (int j = 0; j < N_colors; ++j) {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r += s_new_means[(tid + s)*N_colors + j].r;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g += s_new_means[(tid + s)*N_colors + j].g;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b += s_new_means[(tid + s)*N_colors + j].b;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] += s_counts[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
__syncthreads();
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
new_means[blockIdx.x*N_colors + j].r = s_new_means[j].r;
new_means[blockIdx.x*N_colors + j].g = s_new_means[j].g;
new_means[blockIdx.x*N_colors + j].b = s_new_means[j].b;
counts[j] = s_counts[j];
}
}
}
__global__ void findandsum(Color means[],Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int ncolors) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
atomicAdd(&new_means[assign].r, im[index+2]);
atomicAdd(&new_means[assign].g, im[index+1]);
atomicAdd(&new_means[assign].b, im[index]);
atomicAdd(&counts[assign], 1);
}
}
void assign_colors_seq(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
im[index]=means[assigns[i]].b;
im[index + 1]=means[assigns[i]].g;
im[index + 2]=means[assigns[i]].r;
}
}
__global__ void assign_colors_par(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
im[index]=means[assigns[id]].b;
im[index + 1]=means[assigns[id]].g;
im[index + 2]=means[assigns[id]].r;
}
}
int main(int c, char *v[])
{
int N_colors;
if (c < 4 || c > 5) {
fprintf(stderr, "usage: %s ppm_file n_iterations seed n_colors\n", v[0]);
return -1;
}
else if (c == 4) N_colors = 16;
else if (c == 5) N_colors = atoi(v[4]) ? : 16;
//read image:
bmpInfoHeader infoHeader;
unsigned char *im_host = LoadBMP(v[1], &infoHeader);
//init variables:
float elapsedTime;
int N_iterations = atoi(v[2]);
int Size_row = ((infoHeader.width*3 + 3) / 4) * 4;
int width = infoHeader.width;
int height = infoHeader.height;
int Size = width * height;
//init seed
srand(atoi(v[3]));
//init grid, block, nThreads:
unsigned int nBlocks, nBlocksMeans, nThreads;
nThreads = THREADS;
nBlocks = (Size + nThreads - 1)/nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
nBlocksMeans = (N_colors + nThreads - 1)/nThreads;
dim3 dimGridMeans(nBlocksMeans, 1, 1);
//obtenir memoria HOST:
Color *means_host;
means_host = (Color*) malloc(N_colors*sizeof(Color));
int *counts_host;
counts_host = (int*) malloc(sizeof(int) * N_colors);
Color *means_host_red;
means_host_red = (Color*) malloc((nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
int *counts_host_red;
counts_host_red = (int*) malloc((nBlocks/(2*nThreads)) * sizeof(int) * N_colors);
//inicialitzar means:
init_means(means_host, im_host, Size_row, N_colors, Size);
//cuda events:
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//obtenir memoria DEVICE:
Color *means_device;
Color *new_means;
int *counts;
Color *new_means_2;
int *counts_2;
int *assigns;
unsigned char *im_device;
cudaMalloc((Color**)&means_device, N_colors*sizeof(Color));
cudaMalloc((Color**)&new_means, nBlocks * N_colors*sizeof(Color));
cudaMalloc((int**)&counts, nBlocks * N_colors * sizeof (int));
cudaMalloc((Color**)&new_means_2, (nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
cudaMalloc((int**)&counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof (int));
cudaMalloc((int**)&assigns, Size*sizeof(int));
cudaMalloc((unsigned char**)&im_device, infoHeader.imgsize* sizeof(unsigned char));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
//copiar dades al device:
cudaMemcpy(im_device, im_host, infoHeader.imgsize * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(means_device, means_host, N_colors*sizeof(Color), cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
int shared_memory_size = N_colors*THREADS * sizeof(unsigned int);
//START RECORD!!
cudaEventRecord(start, 0);
//executem k means:
int it;
for (it = 0; it < N_iterations; ++it) {
//set counts and new_means to 0
cudaMemset (counts, 0, nBlocks * sizeof (int) * N_colors);
cudaMemset (new_means, 0, nBlocks * sizeof (Color) * N_colors);
//for each pixel find the best mean.
find_best_mean_par<<<dimGrid, dimBlock>>>(means_device, assigns, im_device, Size, N_colors, Size_row);
cudaDeviceSynchronize();
/*
//Sum up and count points for each cluster.
sum_up_and_count_points_par<<<dimGrid, dimBlock>>>(new_means, assigns, im_device, counts, Size_row, Size, N_colors, s_counts, s_new_means);
cudaDeviceSynchronize();
*/
matrix_reduction_count<<<dimGrid, dimBlock, shared_memory_size>>>(counts, assigns, im_device, Size_row, Size, N_colors);
matrix_reduction_color<<<dimGrid, dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 2);
matrix_reduction_color<<<dimGrid, dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 1);
matrix_reduction_color<<<dimGrid, dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 0);
cudaDeviceSynchronize();
//volmemos a hacer otra reduccion
matrix_reduction_count_2<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(counts_2, counts, Size_row, Size, N_colors);
matrix_reduction_color_2<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 2);
matrix_reduction_color_2<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 1);
matrix_reduction_color_2<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 0);
cudaDeviceSynchronize();
cudaMemcpy(means_host_red, new_means_2, (nBlocks/(2*nThreads)) * N_colors * sizeof(Color), cudaMemcpyDeviceToHost);
cudaMemcpy(counts_host_red, counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof(int), cudaMemcpyDeviceToHost);
memset(counts_host, 0, sizeof (int) * N_colors);
memset(means_host, 0, sizeof (Color) * N_colors);
int i, j;
for (i = 0; i < nBlocks/(2*nThreads); ++i) {
for (j = 0; j < N_colors; ++j) {
counts_host[j] += counts_host_red[i*N_colors + j];
means_host[j].r += means_host_red[i*N_colors + j].r;
means_host[j].g += means_host_red[i*N_colors + j].g;
means_host[j].b += means_host_red[i*N_colors + j].b;
}
}
//aqui tenemos los vectores finales ya reducidos
cudaMemcpy(new_means, means_host, N_colors * sizeof(Color), cudaMemcpyHostToDevice);
cudaMemcpy(counts, counts_host, N_colors * sizeof(int), cudaMemcpyHostToDevice);
/*
findandsum<<<dimGrid, dimBlock>>>(means_device,new_means, assigns, im_device, counts, Size_row, Size, N_colors);
cudaDeviceSynchronize();
*/
//Divide sums by counts to get new centroids.
divide_sums_by_counts_par<<<dimGridMeans, dimBlock>>>(means_device, N_colors, new_means, counts);
cudaDeviceSynchronize();
}
//assignem colors:
assign_colors_par<<<dimGrid, dimBlock>>>(means_device, assigns, im_device, Size_row, Size);
//copy to host:
cudaMemcpy(im_host, im_device, infoHeader.imgsize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//STOP RECORD!!
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//save image
SaveBMP("sortida.bmp", &infoHeader, im_host);
DisplayInfo("sortida.bmp", &infoHeader);
int bytes_read_written = 2 * infoHeader.imgsize* sizeof(unsigned char) + //leer imagen y copiarla
N_iterations * ( //en cada iteracion se hace:
sizeof (int) * 2 * N_colors + //leer y modificar counts
sizeof (Color) * N_colors + //leer y modificar medias
Size * 2 * sizeof(int) + //leer y modificar las asignaciones
Size * 3 * sizeof (unsigned char) //leer datos de imagen
);
printf("\Quantization CUDA\n");
printf("Image Size: %d\n", Size);
printf("nThreads: %d\n", nThreads);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Total Versio 4 = %4.6f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//alliberar memoria HOST:
free(im_host);
free(means_host);
//alliberar memoria DEVICE:
cudaFree(means_device);
cudaFree(new_means);
cudaFree(new_means_2);
cudaFree(assigns);
cudaFree(im_device);
cudaFree(counts);
cudaFree(counts_2);
return 0;
}
|
6a173b28635429921021bc97c492ca7cde3d18af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "roi_align_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
if (R.size() == 0) {
// Handle empty rois
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// The following mutable_data calls are needed to allocate the tensors
Y->mutable_data<float>();
return true;
}
assert(sampling_ratio_ >= 0);
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
int output_size = Y->size();
hipLaunchKernelGGL(( RoIAlignForward<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlign, RoIAlignOp<float, CUDAContext>);
} // namespace caffe2 | 6a173b28635429921021bc97c492ca7cde3d18af.cu | #include "roi_align_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
if (R.size() == 0) {
// Handle empty rois
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// The following mutable_data calls are needed to allocate the tensors
Y->mutable_data<float>();
return true;
}
assert(sampling_ratio_ >= 0);
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
int output_size = Y->size();
RoIAlignForward<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlign, RoIAlignOp<float, CUDAContext>);
} // namespace caffe2 |
83486581ab71d45ec38fa50c21a72f6ee88928d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "cuda_util_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
}
float_t* onevec() {
return nullptr;
} | 83486581ab71d45ec38fa50c21a72f6ee88928d1.cu | #include <cuda_runtime.h>
#include <cublas_v2.h>
#include "cuda_util_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
}
float_t* onevec() {
return nullptr;
} |
0324b09aaecaa22a0c5576eb7b7982f74d26f7fd.hip | // !!! This is a file automatically generated by hipify!!!
// ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Parallel Systems.
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:[email protected]
//
// Example of CUDA Technology Usage wit unified memory.
// Image transformation from RGB to BW schema.
//
// ***********************************************************************
#include <stdio.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#include "pic_type.h"
// Demo kernel to create chess board
__global__ void kernel_chessboard( CudaPic t_color_pic )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_color_pic.m_size.y ) return;
if ( l_x >= t_color_pic.m_size.x ) return;
unsigned char b_or_w = 255 * ( ( blockIdx.x + blockIdx.y ) & 1 );
// Store point into image
//t_color_pic.m_p_uchar3[ l_y * t_color_pic.m_size.x + l_x ] = { b_or_w, b_or_w, b_or_w };
//t_color_pic.m_p_uchar3[t_color_pic.at3(l_y,l_x)] = {b_or_w, b_or_w, b_or_w };
t_color_pic.at3(l_y,l_x) = {b_or_w, b_or_w, b_or_w };
}
void cu_create_chessboard( CudaPic t_color_pic, int t_square_size )
{
hipError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
dim3 l_blocks( ( t_color_pic.m_size.x + t_square_size - 1 ) / t_square_size,
( t_color_pic.m_size.y + t_square_size - 1 ) / t_square_size );
dim3 l_threads( t_square_size, t_square_size );
hipLaunchKernelGGL(( kernel_chessboard), dim3(l_blocks), dim3(l_threads) , 0, 0, t_color_pic );
if ( ( l_cerr = hipGetLastError() ) != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( l_cerr ) );
hipDeviceSynchronize();
}
// -----------------------------------------------------------------------------------------------
// Demo kernel to create picture with alpha channel gradient
__global__ void kernel_alphaimg( CudaPic t_color_pic, uchar3 t_color )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_color_pic.m_size.y ) return;
if ( l_x >= t_color_pic.m_size.x ) return;
int l_diagonal = sqrtf( t_color_pic.m_size.x * t_color_pic.m_size.x + t_color_pic.m_size.y * t_color_pic.m_size.y );
int l_dx = l_x - t_color_pic.m_size.x / 2;
int l_dy = l_y - t_color_pic.m_size.y / 2;
int l_dxy = sqrtf( l_dx * l_dx + l_dy * l_dy ) - l_diagonal / 2;
// Store point into image
//t_color_pic.m_p_uchar4[ l_y * t_color_pic.m_size.x + l_x ] =
//{ t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
//t_color_pic.m_p_uchar4[t_color_pic.at4(l_y, l_x)] = { t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
t_color_pic.at4(l_y,l_x) = { t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
}
void cu_create_alphaimg( CudaPic t_color_pic, uchar3 t_color )
{
hipError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
int l_block_size = 32;
dim3 l_blocks( ( t_color_pic.m_size.x + l_block_size - 1 ) / l_block_size,
( t_color_pic.m_size.y + l_block_size - 1 ) / l_block_size );
dim3 l_threads( l_block_size, l_block_size );
hipLaunchKernelGGL(( kernel_alphaimg), dim3(l_blocks), dim3(l_threads) , 0, 0, t_color_pic, t_color );
if ( ( l_cerr = hipGetLastError() ) != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( l_cerr ) );
hipDeviceSynchronize();
}
// -----------------------------------------------------------------------------------------------
// Demo kernel to create picture with alpha channel gradient
__global__ void kernel_insertimage( CudaPic t_big_pic, CudaPic t_small_pic, int2 t_position )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_small_pic.m_size.y ) return;
if ( l_x >= t_small_pic.m_size.x ) return;
int l_by = l_y + t_position.y;
int l_bx = l_x + t_position.x;
if ( l_by >= t_big_pic.m_size.y || l_by < 0 ) return;
if ( l_bx >= t_big_pic.m_size.x || l_bx < 0 ) return;
// Get point from small image
uchar4 l_fg_bgra = t_small_pic.m_p_uchar4[ l_y * t_small_pic.m_size.x + l_x ];
uchar3 l_bg_bgr = t_big_pic.m_p_uchar3[ l_by * t_big_pic.m_size.x + l_bx ];
uchar3 l_bgr = { 0, 0, 0 };
// compose point from small and big image according alpha channel
l_bgr.x = l_fg_bgra.x * l_fg_bgra.w / 255 + l_bg_bgr.x * ( 255 - l_fg_bgra.w ) / 255;
l_bgr.y = l_fg_bgra.y * l_fg_bgra.w / 255 + l_bg_bgr.y * ( 255 - l_fg_bgra.w ) / 255;
l_bgr.z = l_fg_bgra.z * l_fg_bgra.w / 255 + l_bg_bgr.z * ( 255 - l_fg_bgra.w ) / 255;
// Store point into image
//t_big_pic.m_p_uchar3[ l_by * t_big_pic.m_size.x + l_bx ] = l_bgr;
//t_big_pic.m_p_uchar3[t_big_pic.at3(l_by,l_bx)] = l_bgr;
t_big_pic.at3(l_y,l_x) = l_bgr;
}
void cu_insertimage( CudaPic t_big_pic, CudaPic t_small_pic, int2 t_position )
{
hipError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
int l_block_size = 32;
dim3 l_blocks( ( t_small_pic.m_size.x + l_block_size - 1 ) / l_block_size,
( t_small_pic.m_size.y + l_block_size - 1 ) / l_block_size );
dim3 l_threads( l_block_size, l_block_size );
hipLaunchKernelGGL(( kernel_insertimage), dim3(l_blocks), dim3(l_threads) , 0, 0, t_big_pic, t_small_pic, t_position );
if ( ( l_cerr = hipGetLastError() ) != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( l_cerr ) );
hipDeviceSynchronize();
}
| 0324b09aaecaa22a0c5576eb7b7982f74d26f7fd.cu | // ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Parallel Systems.
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:[email protected]
//
// Example of CUDA Technology Usage wit unified memory.
// Image transformation from RGB to BW schema.
//
// ***********************************************************************
#include <stdio.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime.h>
#include "pic_type.h"
// Demo kernel to create chess board
__global__ void kernel_chessboard( CudaPic t_color_pic )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_color_pic.m_size.y ) return;
if ( l_x >= t_color_pic.m_size.x ) return;
unsigned char b_or_w = 255 * ( ( blockIdx.x + blockIdx.y ) & 1 );
// Store point into image
//t_color_pic.m_p_uchar3[ l_y * t_color_pic.m_size.x + l_x ] = { b_or_w, b_or_w, b_or_w };
//t_color_pic.m_p_uchar3[t_color_pic.at3(l_y,l_x)] = {b_or_w, b_or_w, b_or_w };
t_color_pic.at3(l_y,l_x) = {b_or_w, b_or_w, b_or_w };
}
void cu_create_chessboard( CudaPic t_color_pic, int t_square_size )
{
cudaError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
dim3 l_blocks( ( t_color_pic.m_size.x + t_square_size - 1 ) / t_square_size,
( t_color_pic.m_size.y + t_square_size - 1 ) / t_square_size );
dim3 l_threads( t_square_size, t_square_size );
kernel_chessboard<<< l_blocks, l_threads >>>( t_color_pic );
if ( ( l_cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( l_cerr ) );
cudaDeviceSynchronize();
}
// -----------------------------------------------------------------------------------------------
// Demo kernel to create picture with alpha channel gradient
__global__ void kernel_alphaimg( CudaPic t_color_pic, uchar3 t_color )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_color_pic.m_size.y ) return;
if ( l_x >= t_color_pic.m_size.x ) return;
int l_diagonal = sqrtf( t_color_pic.m_size.x * t_color_pic.m_size.x + t_color_pic.m_size.y * t_color_pic.m_size.y );
int l_dx = l_x - t_color_pic.m_size.x / 2;
int l_dy = l_y - t_color_pic.m_size.y / 2;
int l_dxy = sqrtf( l_dx * l_dx + l_dy * l_dy ) - l_diagonal / 2;
// Store point into image
//t_color_pic.m_p_uchar4[ l_y * t_color_pic.m_size.x + l_x ] =
//{ t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
//t_color_pic.m_p_uchar4[t_color_pic.at4(l_y, l_x)] = { t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
t_color_pic.at4(l_y,l_x) = { t_color.x, t_color.y, t_color.z, ( unsigned char ) ( 255 - 255 * l_dxy / ( l_diagonal / 2 ) ) };
}
void cu_create_alphaimg( CudaPic t_color_pic, uchar3 t_color )
{
cudaError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
int l_block_size = 32;
dim3 l_blocks( ( t_color_pic.m_size.x + l_block_size - 1 ) / l_block_size,
( t_color_pic.m_size.y + l_block_size - 1 ) / l_block_size );
dim3 l_threads( l_block_size, l_block_size );
kernel_alphaimg<<< l_blocks, l_threads >>>( t_color_pic, t_color );
if ( ( l_cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( l_cerr ) );
cudaDeviceSynchronize();
}
// -----------------------------------------------------------------------------------------------
// Demo kernel to create picture with alpha channel gradient
__global__ void kernel_insertimage( CudaPic t_big_pic, CudaPic t_small_pic, int2 t_position )
{
// X,Y coordinates and check image dimensions
int l_y = blockDim.y * blockIdx.y + threadIdx.y;
int l_x = blockDim.x * blockIdx.x + threadIdx.x;
if ( l_y >= t_small_pic.m_size.y ) return;
if ( l_x >= t_small_pic.m_size.x ) return;
int l_by = l_y + t_position.y;
int l_bx = l_x + t_position.x;
if ( l_by >= t_big_pic.m_size.y || l_by < 0 ) return;
if ( l_bx >= t_big_pic.m_size.x || l_bx < 0 ) return;
// Get point from small image
uchar4 l_fg_bgra = t_small_pic.m_p_uchar4[ l_y * t_small_pic.m_size.x + l_x ];
uchar3 l_bg_bgr = t_big_pic.m_p_uchar3[ l_by * t_big_pic.m_size.x + l_bx ];
uchar3 l_bgr = { 0, 0, 0 };
// compose point from small and big image according alpha channel
l_bgr.x = l_fg_bgra.x * l_fg_bgra.w / 255 + l_bg_bgr.x * ( 255 - l_fg_bgra.w ) / 255;
l_bgr.y = l_fg_bgra.y * l_fg_bgra.w / 255 + l_bg_bgr.y * ( 255 - l_fg_bgra.w ) / 255;
l_bgr.z = l_fg_bgra.z * l_fg_bgra.w / 255 + l_bg_bgr.z * ( 255 - l_fg_bgra.w ) / 255;
// Store point into image
//t_big_pic.m_p_uchar3[ l_by * t_big_pic.m_size.x + l_bx ] = l_bgr;
//t_big_pic.m_p_uchar3[t_big_pic.at3(l_by,l_bx)] = l_bgr;
t_big_pic.at3(l_y,l_x) = l_bgr;
}
void cu_insertimage( CudaPic t_big_pic, CudaPic t_small_pic, int2 t_position )
{
cudaError_t l_cerr;
// Grid creation, size of grid must be equal or greater than images
int l_block_size = 32;
dim3 l_blocks( ( t_small_pic.m_size.x + l_block_size - 1 ) / l_block_size,
( t_small_pic.m_size.y + l_block_size - 1 ) / l_block_size );
dim3 l_threads( l_block_size, l_block_size );
kernel_insertimage<<< l_blocks, l_threads >>>( t_big_pic, t_small_pic, t_position );
if ( ( l_cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( l_cerr ) );
cudaDeviceSynchronize();
}
|
16851a3e89fe39b8a740c207f39d523c56d1070a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
//#include <time.h>
#define N 4
__global__ void *MoreSums(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(void){
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**)&dev_c,size);
int a[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
int b[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
int c[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
hipMemcpy(dev_a,&a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,&b,size, hipMemcpyHostToDevice);
/*
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]); */
//MoreSums<<<N,1>>>(dev_a,dev_b,dev_c); //MODIFICAR: usar N threads em vez de blocos
//verify_kernel_with_three_args(MoreSums,N,1,dev_a,dev_b,dev_c);
verify_kernel(MoreSums,N,1,dev_a,dev_b,dev_c);
hipMemcpy(&c,dev_c,size,hipMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]==a[i]+b[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 16851a3e89fe39b8a740c207f39d523c56d1070a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <assert.h>
//#include <time.h>
#define N 4
__global__ void *MoreSums(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(void){
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**)&dev_c,size);
int a[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
int b[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
int c[N] = {1, 2, 3, 4};//, 1, 2, 3, 4};
cudaMemcpy(dev_a,&a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,&b,size, cudaMemcpyHostToDevice);
/*
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]); */
//MoreSums<<<N,1>>>(dev_a,dev_b,dev_c); //MODIFICAR: usar N threads em vez de blocos
//verify_kernel_with_three_args(MoreSums,N,1,dev_a,dev_b,dev_c);
verify_kernel(MoreSums,N,1,dev_a,dev_b,dev_c);
cudaMemcpy(&c,dev_c,size,cudaMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]==a[i]+b[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
e9d2cb8c2c2c7238b1fbd9f399f4f5e48489cd41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FpsDisplay.h"
#include <stdexcept>
using std::runtime_error;
__global__ static void drawDigitPixel(char* devDigits, uint2 digitDim, size_t digit, uint2 imageDim, uchar4* pixels)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
size_t imageWidth = imageDim.x;
size_t imageHeight = imageDim.y;
size_t offset = (imageHeight - y - 1) * imageWidth + imageWidth - x - 1;
if (devDigits[digitDim.x - x - 1 + y * digitDim.x + digit * digitDim.x * digitDim.y] != 0)
{
pixels[offset].x = 255;
pixels[offset].y = 0;
pixels[offset].z = 0;
}
}
__global__ static void drawPointPixel(char* devPoint, uint2 pointDim, uint2 imageDim, uchar4* pixels)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
size_t imageWidth = imageDim.x;
size_t imageHeight = imageDim.y;
size_t offset = (imageHeight - y - 1) * imageWidth + imageWidth - x - 1;
if (devPoint[x + y * pointDim.x] != 0)
{
pixels[offset].x = 255;
pixels[offset].y = 0;
pixels[offset].z = 0;
}
}
FpsDisplay::FpsDisplay(uint2 imageDim) :
m_imageDim{ imageDim.x, imageDim.y }
{
hipError_t error;
error = hipMalloc((void**)&m_devDigits, NumberOfDigits * DigitHeight * DigitWidth * sizeof(char));
if (error != hipSuccess)
throw runtime_error(makeCudaErrorMessage("hipMalloc", error, __FILE__, __LINE__));
error = hipMemcpy(m_devDigits, m_digits, NumberOfDigits * DigitHeight * DigitWidth * sizeof(char), hipMemcpyHostToDevice);
if (error != hipSuccess)
throw runtime_error(makeCudaErrorMessage("hipMemcpy", error, __FILE__, __LINE__));
error = hipMalloc((void**)&m_devPoint, DigitHeight * PointWidth * sizeof(char));
if (error != hipSuccess)
throw runtime_error(makeCudaErrorMessage("hipMalloc", error, __FILE__, __LINE__));
error = hipMemcpy(m_devPoint, m_point, DigitHeight * PointWidth * sizeof(char), hipMemcpyHostToDevice);
if (error != hipSuccess)
throw runtime_error(makeCudaErrorMessage("hipMemcpy", error, __FILE__, __LINE__));
}
void FpsDisplay::displayFps(uchar4* pixels, float fps)
{
unsigned int fps100 = fps * 100;
size_t digit, position = 2;
digit = fps100 % 10;
drawDigit(digit, m_imageDim, pixels);
fps100 /= 10;
digit = fps100 % 10;
drawDigit(digit, m_imageDim, pixels - DigitWidth);
fps100 /= 10;
drawPoint(m_imageDim, pixels - 2 * DigitWidth);
while (fps100 != 0)
{
digit = fps100 % 100;
drawDigit(digit, m_imageDim, pixels - position * DigitWidth - PointWidth);
fps100 /= 10;
position++;
}
hipDeviceSynchronize();
}
void FpsDisplay::drawDigit(size_t digit, uint2 imageDim, uchar4* pixels)
{
dim3 threads(DigitWidth, DigitHeight);
uint2 digitDim;
digitDim.x = DigitWidth;
digitDim.y = DigitHeight;
drawDigitPixel << <1, threads >> > (m_devDigits, digitDim, digit, imageDim, pixels);
}
void FpsDisplay::drawPoint(uint2 imageDim, uchar4* pixels)
{
dim3 threads(PointWidth, DigitHeight);
uint2 pointDim;
pointDim.x = PointWidth;
pointDim.y = DigitHeight;
drawPointPixel << <1, threads >> > (m_devPoint, pointDim, imageDim, pixels);
}
| e9d2cb8c2c2c7238b1fbd9f399f4f5e48489cd41.cu | #include "FpsDisplay.h"
#include <stdexcept>
using std::runtime_error;
__global__ static void drawDigitPixel(char* devDigits, uint2 digitDim, size_t digit, uint2 imageDim, uchar4* pixels)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
size_t imageWidth = imageDim.x;
size_t imageHeight = imageDim.y;
size_t offset = (imageHeight - y - 1) * imageWidth + imageWidth - x - 1;
if (devDigits[digitDim.x - x - 1 + y * digitDim.x + digit * digitDim.x * digitDim.y] != 0)
{
pixels[offset].x = 255;
pixels[offset].y = 0;
pixels[offset].z = 0;
}
}
__global__ static void drawPointPixel(char* devPoint, uint2 pointDim, uint2 imageDim, uchar4* pixels)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
size_t imageWidth = imageDim.x;
size_t imageHeight = imageDim.y;
size_t offset = (imageHeight - y - 1) * imageWidth + imageWidth - x - 1;
if (devPoint[x + y * pointDim.x] != 0)
{
pixels[offset].x = 255;
pixels[offset].y = 0;
pixels[offset].z = 0;
}
}
FpsDisplay::FpsDisplay(uint2 imageDim) :
m_imageDim{ imageDim.x, imageDim.y }
{
cudaError error;
error = cudaMalloc((void**)&m_devDigits, NumberOfDigits * DigitHeight * DigitWidth * sizeof(char));
if (error != cudaSuccess)
throw runtime_error(makeCudaErrorMessage("cudaMalloc", error, __FILE__, __LINE__));
error = cudaMemcpy(m_devDigits, m_digits, NumberOfDigits * DigitHeight * DigitWidth * sizeof(char), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
throw runtime_error(makeCudaErrorMessage("cudaMemcpy", error, __FILE__, __LINE__));
error = cudaMalloc((void**)&m_devPoint, DigitHeight * PointWidth * sizeof(char));
if (error != cudaSuccess)
throw runtime_error(makeCudaErrorMessage("cudaMalloc", error, __FILE__, __LINE__));
error = cudaMemcpy(m_devPoint, m_point, DigitHeight * PointWidth * sizeof(char), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
throw runtime_error(makeCudaErrorMessage("cudaMemcpy", error, __FILE__, __LINE__));
}
void FpsDisplay::displayFps(uchar4* pixels, float fps)
{
unsigned int fps100 = fps * 100;
size_t digit, position = 2;
digit = fps100 % 10;
drawDigit(digit, m_imageDim, pixels);
fps100 /= 10;
digit = fps100 % 10;
drawDigit(digit, m_imageDim, pixels - DigitWidth);
fps100 /= 10;
drawPoint(m_imageDim, pixels - 2 * DigitWidth);
while (fps100 != 0)
{
digit = fps100 % 100;
drawDigit(digit, m_imageDim, pixels - position * DigitWidth - PointWidth);
fps100 /= 10;
position++;
}
cudaDeviceSynchronize();
}
void FpsDisplay::drawDigit(size_t digit, uint2 imageDim, uchar4* pixels)
{
dim3 threads(DigitWidth, DigitHeight);
uint2 digitDim;
digitDim.x = DigitWidth;
digitDim.y = DigitHeight;
drawDigitPixel << <1, threads >> > (m_devDigits, digitDim, digit, imageDim, pixels);
}
void FpsDisplay::drawPoint(uint2 imageDim, uchar4* pixels)
{
dim3 threads(PointWidth, DigitHeight);
uint2 pointDim;
pointDim.x = PointWidth;
pointDim.y = DigitHeight;
drawPointPixel << <1, threads >> > (m_devPoint, pointDim, imageDim, pixels);
}
|
a064626e27fd44a17ab479e9eeb3dca60fb87b1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "FinalSolver.hpp"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "CudaHelpers.hpp"
#include "CudaOperatorReplacer.hpp"
#include "SLESolver.hpp"
#define GRAVY_CONST 6.673
GridData LightLinearisedMinimalError(Task task, float alpha)
{
GridParameters gp = task.grid.GetGridParameters();
int M = gp.NX * gp.NY;
Matrix A(M, 1), F(M, 1), Z(M, 1);
Z.Fill(task.initialZ);
task.grid.FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(task);
float *devF;
float *devA;
float *devZ;
float *devTmp1;
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devZ));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp1));
printf("Solving with Linearised Minimal Error Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectTask(Z, A, task);
error = (F - A).Norm() / F.Norm();
printf("Iteration #%d.\tError = %f.\n", iteration, error);
if (error < task.precision)
break;
CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(hipblasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
//A = A - F
hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(hipblasGetError());
// ||(A - F)||
float n1 = hipblasSnrm2(M, devA, 1);
//S = AnkT(A - F)
oper.CalcATX(devZ, devA, devTmp1);
// ||S||
float n2 = hipblasSnrm2(M, devTmp1, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
hipblasSaxpy(M, gamma, devTmp1, 1, devZ, 1);
hipblasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
Z *= -1.0f;
GridData out(gp);
memcpy(out.data, Z.elements, M * sizeof(float));
CheckCublas(hipblasFree(devTmp1));
CheckCublas(hipblasFree(devZ));
CheckCublas(hipblasFree(devA));
CheckCublas(hipblasFree(devF));
return out;
}
GridData LightLinearisedSpeedDescent(Task task, float alpha)
{
GridParameters gp = task.grid.GetGridParameters();
int M = gp.NX * gp.NY;
Matrix A(M, 1), F(M, 1), Z(M, 1);
Z.Fill(task.initialZ);
task.grid.FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(task);
float *devF;
float *devA;
float *devZ;
float *devS;
float *devC;
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devZ));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devS));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devC));
CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(hipblasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Speed Descent Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectTask(Z, A, task);
CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
//A = A - F
hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(hipblasGetError());
//------------------------------------------
float err1 = hipblasSnrm2(M, devA, 1);
CheckCublas(hipblasGetError());
float err2 = hipblasSnrm2(M, devF, 1);
CheckCublas(hipblasGetError());
error = err1 / err2;
printf("Iteration #%d.\tError = %f.\n", iteration, error);
if (error < task.precision)
break;
//------------------------------------------
//S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// C = Ank * S
oper.CalcAX(devZ, devS, devC);
// ||S||
float n1 = hipblasSnrm2(M, devS, 1);
// ||C||
float n2 = hipblasSnrm2(M, devC, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
hipblasSaxpy(M, gamma, devS, 1, devZ, 1);
hipblasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
Z *= -1.0f;
GridData out(gp);
memcpy(out.data, Z.elements, M * sizeof(float));
CheckCublas(hipblasFree(devC));
CheckCublas(hipblasFree(devS));
CheckCublas(hipblasFree(devZ));
CheckCublas(hipblasFree(devA));
CheckCublas(hipblasFree(devF));
return out;
}
//GridData LightLevenbergMarkvardt(Task t, float alpha)
//{
// GridParameters gp = t.grid.GetGridParameters();
// int M = gp.NX * gp.NY;
//
// Matrix A(M, 1), F(M, 1), Z(M, 1), Ank(M, M), Tmp1(M, 1), Tmp2(M, 1);
// Z.Fill(t.initialZ);
// t.grid.FillMatrix(F);
//
// float error;
// int iteration = 1;
//
// CudaDirectSolver dslvr(gp, true);
// CudaOperatorReplacer oper(t);
//
// float *devF;
// float *devA;
// float *devZ;
// float *devTmp1;
// float *devTmp2;
// float *devTmp3;
//
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devZ));
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp1));
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp2));
// CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp3));
//
// CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
// CheckCublas(hipblasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
//
// float beta = 0.01f;
// float gamma = 0.5f;
//
// printf("Solving with Levenberg-Markvardt Method\n");
//
// clock_t t0 = clock();
//
// while (true)
// {
// dslvr.SolveDirectTask(Z, A, Ank, t);
//
// CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
//
// //A = A - F;
// //A = A - F
// hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
// CheckCublas(hipblasGetError());
//
// //------------------------------------------
// float err1 = hipblasSnrm2(M, devA, 1);
// CheckCublas(hipblasGetError());
// float err2 = hipblasSnrm2(M, devF, 1);
// CheckCublas(hipblasGetError());
// error = err1 / err2;
//
// printf("Iteration #%d.\tError = %f.\n", iteration, error);
//
// if (error < t.precision)
// break;
// //------------------------------------------
//
// //Tmp1 = Ank * A;
// //Tmp1 = AnkT(A - F)
// oper.CalcATX(devZ, devA, devTmp1);
//
// //Tmp *= -gamma;
//
// hipblasSscal(M, -gamma, devTmp1, 1);
// CheckCublas(hipblasGetError());
//
// //Z *= beta;
// //Z += Tmp1;
// hipblasSaxpy(M, beta, devZ, 1, devTmp1, 1);
// CheckCublas(hipblasGetError());
//
// //Tmp2 = Ank * Z;
// //Z = Ank.Transpose() * Tmp2;
// oper.CalcAX(devZ, devZ, devTmp2);
// oper.CalcATX(devZ, devTmp2, devTmp3);
//
// hipblasSaxpy(M, 1.0f, devTmp3, 1, devTmp1, 1);
// // devTmp1
//
// //****************************** *************************************************
// // devTmp2 Z, devTmp1
//
//
// //**********************************************************************************************
//
// hipblasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
//
// iteration++;
// }
//
// printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
// Z *= -1.0f;
// GridData out(gp);
// memcpy(out.data, Z.elements, M * sizeof(float));
//
// CheckCublas(hipblasFree(devTmp3));
// CheckCublas(hipblasFree(devTmp2));
// CheckCublas(hipblasFree(devTmp1));
// CheckCublas(hipblasFree(devZ));
// CheckCublas(hipblasFree(devA));
// CheckCublas(hipblasFree(devF));
//
// return out;
//}
//void CublasSolveSLE(float *devA, float *devz, float *devb, int M)
//{
// float *devzpp;
// float *devrk;
// float *devArk;
// float *devzp;
// float *devpk;
// float *devApk;
// float *devTmp;
// cublasStatus state;
// float normb = hipblasSnrm2(M, devb, 1);
//
// state = hipblasAlloc(M, sizeof(float), (void**)&devzpp);
// hipblasScopy(M, devz, 1, devzpp, 1);
// state = hipblasGetError();
//
// state = hipblasAlloc(M, sizeof(float), (void**)&devrk);
// hipblasScopy(M, devb, 1, devrk, 1);
// state = hipblasGetError();
//
// hipblasSgemv('T', M, M, 1.0f, devA, M, devzpp, 1, -1.0f, devrk, 1);
// state = hipblasGetError();
//
// float normr = hipblasSnrm2(M, devrk, 1);
//
// state = hipblasAlloc(M, sizeof(float), (void**)&devArk);
// hipblasSgemv('T', M, M, 1.0f, devA, M, devrk, 1, 0.0f, devArk, 1);
// state = hipblasGetError();
//
// float d = hipblasSdot(M, devArk, 1, devrk, 1);
// state = hipblasGetError();
//
// state = hipblasAlloc(M, sizeof(float), (void**)&devzp);
// hipblasScopy(M, devzpp, 1, devzp, 1);
// state = hipblasGetError();
//
// hipblasSaxpy(M, - (normr * normr / d), devrk, 1, devzp, 1);
// state = hipblasGetError();
//
// state = hipblasAlloc(M, sizeof(float), (void**)&devpk);
// state = hipblasAlloc(M, sizeof(float), (void**)&devApk);
// state = hipblasAlloc(M, sizeof(float), (void**)&devTmp);
//
// int flag = 1;
// int iterations = 1;
//
// while (flag == 1)
// {
// hipblasScopy(M, devb, 1, devrk, 1);
// state = hipblasGetError();
// hipblasSgemv('T', M, M, 1.0f, devA, M, devzp, 1, -1.0f, devrk, 1);
// state = hipblasGetError();
//
// normr = hipblasSnrm2(M, devrk, 1);
// state = hipblasGetError();
//
// hipblasScopy(M, devzp, 1, devpk, 1);
// state = hipblasGetError();
//
// hipblasSaxpy(M, -1.0f, devzpp, 1, devpk, 1);
// state = hipblasGetError();
//
// hipblasSgemv('T', M, M, 1.0f, devA, M, devrk, 1, 0.0f, devArk, 1);
// state = hipblasGetError();
// hipblasSgemv('T', M, M, 1.0f, devA, M, devpk, 1, 0.0f, devApk, 1);
// state = hipblasGetError();
//
// float dot1 = hipblasSdot(M, devArk, 1, devpk, 1);
// state = hipblasGetError();
// float dot2 = hipblasSdot(M, devrk, 1, devpk, 1);
// state = hipblasGetError();
// float dot3 = hipblasSdot(M, devArk, 1, devrk, 1);
// state = hipblasGetError();
// float dot4 = hipblasSdot(M, devApk, 1, devpk, 1);
// state = hipblasGetError();
//
// d = dot3 * dot4 - dot1 * dot1;
//
// float gamma = ((normr * normr) * dot4 - dot2 * dot1) / d;
// float beta = ((normr * normr) * dot1 - dot2 * dot3) / d;
//
// hipblasScopy(M, devzp, 1, devzpp, 1);
// state = hipblasGetError();
//
// hipblasSaxpy(M, -gamma, devrk, 1, devzp, 1);
// state = hipblasGetError();
// hipblasSaxpy(M, beta, devpk, 1, devzp, 1);
// state = hipblasGetError();
//
// hipblasScopy(M, devb, 1, devTmp, 1);
// state = hipblasGetError();
//
// hipblasSgemv('T', M, M, 1.0f, devA, M, devzp, 1, -1.0f, devTmp, 1);
// state = hipblasGetError();
//
// double norm = hipblasSnrm2(M, devTmp, 1);
// state = hipblasGetError();
//
// double error = norm / normb;
//
// printf(" Iteration:%d\terror:%f\n", iterations, error);
//
// if (error < 0.001)
// flag = 0;
//
// iterations++;
// }
//
// hipblasFree(devzp);
// hipblasFree(devzpp);
// hipblasFree(devArk);
// hipblasFree(devApk);
// hipblasFree(devrk);
// hipblasFree(devpk);
// hipblasFree(devTmp);
//
// return;
//}
GridData LightLevenbergMarkvardt(Task t, float alpha)
{
GridParameters gp = t.grid.GetGridParameters();
DirectSolver dslvr(gp);
int M = gp.NX * gp.NY;
int i, j, l, k;
Matrix F(M, 1), A(M, 1), Z(M, 1), TMP1(M, 1), TMP2(M, 1);
Matrix Ank(M, M), Bnk(M, M);
Matrix exactZ(M, 1);
exactZ *= -1.0f;
t.grid.FillMatrix(F);
F *= -1.0f;
t.exactSolution.FillMatrix(exactZ);
Z.Fill(t.initialZ);
cublasStatus state;
float *devA;
float *devAnk;
float *devF;
float *devZ;
float *devTmpV1;
float *devB;
state = hipblasAlloc(M, sizeof(float), (void**)&devA);
state = hipblasAlloc(M * M, sizeof(float), (void**)&devAnk);
state = hipblasAlloc(M, sizeof(float), (void**)&devF);
state = hipblasAlloc(M, sizeof(float), (void**)&devZ);
state = hipblasAlloc(M, sizeof(float), (void**)&devTmpV1);
state = hipblasAlloc(M * M, sizeof(float), (void**)&devB);
float beta = 0.01f;
float gamma = 0.5f;
int iteration = 1;
while (true)
{
dslvr.SolveDirectTask(Z, A, Ank, t);
//A *= -1.0f;
state = hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1);
state = hipblasSetMatrix(M, M, sizeof(float), Ank.elements, M, devAnk, M);
state = hipblasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1);
state = hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1);
float err = (Z + exactZ).Norm() / exactZ.Norm();
float u = (A + F).Norm() / F.Norm();
cout << "Levenberg-Markvardt error = " << u << " 2 = " << err << endl;
if (u < t.precision)
{
break;
}
//A - F
hipblasScopy(M, devA, 1, devTmpV1, 1);
state = hipblasGetError();
hipblasSaxpy(M, 1.0f, devF, 1, devTmpV1, 1);
state = hipblasGetError();
hipblasScopy(M, devTmpV1, 1, devF, 1);
state = hipblasGetError();
hipblasSgemv('N', M, M, gamma, devAnk, M, devF, 1, 0.0f, devTmpV1, 1);
state = hipblasGetError();
hipblasScopy(M, devTmpV1, 1, devF, 1);
state = hipblasGetError();
hipblasSgemm('N', 'T', M, M, M, 1.0f, devAnk, M, devAnk, M, 0.0f, devB, M);
state = hipblasGetError();
hipblasSaxpy(M, beta, devZ, 1, devF, 1);
state = hipblasGetError();
hipblasSgemv('T', M, M, 1.0f, devB, M, devZ, 1, 1.0f, devF, 1);
state = hipblasGetError();
state = hipblasGetVector(M, sizeof(float), devF, 1, TMP2.elements, 1);
state = hipblasGetMatrix(M, M, sizeof(float), devB, M, Ank.elements, M);
Ank.AddToDiagonal(beta);
SolveSLE(Ank, Z, TMP2, t.initialZ);
iteration++;
}
Z *= -1.0f;
GridData result(gp);
memcpy(result.data, Z.elements, M * sizeof(float));
return result;
}
__global__ void Dempfers(float *Z, float *dempf, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N)
return;
Z[index] *= dempf[index];
}
vector<GridData> LightMultilayerLinearisedMinimalError(MultilayerTask mTask, float alpha)
{
GridParameters gp = mTask.GetGeneralGridParameters();
int M = gp.NX * gp.NY;
int L = mTask.GetLayersCount();
Matrix A(M, 1), F(M, 1), TMP1(L * M, 1), Z(L * M, 1), Ze(L * M, 1);
mTask.InitZ(Z);
mTask.GetGeneralField().FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(mTask);
float *devF;
float *devA;
float *devZ;
float *devS;
float *dempf;
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&devS));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&dempf));
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
memcpy(&Ze.elements[i * M], mTask[i].exactSolution.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] = pow(abs(Demp.elements[i]), 1.2f);
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//
CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(hipblasSetVector(L * M, sizeof(float), Demp.elements, 1, dempf, 1));
CheckCublas(hipblasSetVector(L * M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Minimal Error Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectMultilayerTask(mTask, Z, A);
CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
// A = A - F
hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(hipblasGetError());
// ||(A - F)||
float n1 = hipblasSnrm2(M, devA, 1);
//------------------------------------------
float err2 = hipblasSnrm2(M, devF, 1);
CheckCublas(hipblasGetError());
error = n1 / err2;
printf("Iteration #%d.\tError = %f. Zerror = %f\n", iteration, error, (Ze - Z).Norm() / Ze.Norm());
if (error < mTask[0].precision)
break;
//------------------------------------------
// S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// ||S||
float n2 = hipblasSnrm2(L * M, devS, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
int block_size = 256;
int grid_size = L * M / block_size + (L * M % block_size != 0 ? 1 : 0);
hipLaunchKernelGGL(( Dempfers), dim3(grid_size), dim3(block_size), 0, 0, devS, dempf, L * M);
CheckCuda(hipGetLastError());
hipblasSaxpy(L * M, gamma, devS, 1, devZ, 1);
CheckCublas(hipblasGetVector(L * M, sizeof(float), devZ, 1, Z.elements, 1));
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(hipblasFree(dempf));
CheckCublas(hipblasFree(devS));
CheckCublas(hipblasFree(devZ));
CheckCublas(hipblasFree(devA));
CheckCublas(hipblasFree(devF));
return result;
}
vector<GridData> LightMultilayerLinearisedSpeedDescent(MultilayerTask mTask, float alpha)
{
GridParameters gp = mTask.GetGeneralGridParameters();
int M = gp.NX * gp.NY;
int L = mTask.GetLayersCount();
Matrix A(M, 1), F(M, 1), Z(L * M, 1), Ze(L * M, 1);
mTask.InitZ(Z);
mTask.GetGeneralField().FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(mTask);
float *devF;
float *devA;
float *devZ;
float *devS;
float *devC;
float *dempf;
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&devS));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devC));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&dempf));
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
memcpy(&Ze.elements[i * M], mTask[i].exactSolution.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] = pow(abs(Demp.elements[i]), 1.2f);
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//
CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(hipblasSetVector(L * M, sizeof(float), Demp.elements, 1, dempf, 1));
CheckCublas(hipblasSetVector(L * M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Speed Descent Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectMultilayerTask(mTask, Z, A);
CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
// A = A - F
hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(hipblasGetError());
//------------------------------------------
float err1 = hipblasSnrm2(M, devA, 1);
CheckCublas(hipblasGetError());
float err2 = hipblasSnrm2(M, devF, 1);
CheckCublas(hipblasGetError());
error = err1 / err2;
printf("Iteration #%d.\tError = %f. Zerror = %f\n", iteration, error, (Ze - Z).Norm() / Ze.Norm());
if (error < mTask[0].precision)
break;
//------------------------------------------
// S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// ||S||
float n1 = hipblasSnrm2(L * M, devS, 1);
// C = Ank * S
oper.CalcAX(devZ, devS, devC);
// ||C||
float n2 = hipblasSnrm2(M, devC, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
int block_size = 256;
int grid_size = L * M / block_size + (L * M % block_size != 0 ? 1 : 0);
hipLaunchKernelGGL(( Dempfers), dim3(grid_size), dim3(block_size), 0, 0, devS, dempf, L * M);
CheckCuda(hipGetLastError());
hipblasSaxpy(L * M, gamma, devS, 1, devZ, 1);
CheckCublas(hipblasGetVector(L * M, sizeof(float), devZ, 1, Z.elements, 1));
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(hipblasFree(dempf));
CheckCublas(hipblasFree(devS));
CheckCublas(hipblasFree(devZ));
CheckCublas(hipblasFree(devA));
CheckCublas(hipblasFree(devF));
return result;
}
vector<GridData> LightMultilayerLevenbergMarkvardt(MultilayerTask mTask, float alpha)
{
//Definitions
GridParameters gp = mTask.GetGeneralGridParameters();
int L = mTask.GetLayersCount();
int M = gp.NX * gp.NY;
Matrix Ank(L * M, M), Ank1(M,L * M), B(M, M);
Matrix A(M, 1), F(M, 1), TMP1(M, 1), TMP2(M, 1);
Matrix Z(L * M, 1);
//Initialising of Z vector
for (int i = 0; i < L; i++)
{
for(int j = 0; j < M; j++)
{
Z.elements[i * M + j] = mTask[i].initialZ;
}
}
//Form dempfer values
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//Initalising of general field matrix
mTask.GetGeneralField().FillMatrix(F);
//F *= -1.0f;
//Error value declaration
float precision = 100000000.0f;
for (int i = 0; i < L; i++)
{
if (mTask[i].precision < 0)
{
throw new string("Invalid precision value");
}
if (mTask[i].precision < precision)
{
precision = mTask[i].precision;
}
}
float *devF;
float *devA;
float *devZ;
float *devTmp1;
float *devTmp2;
float *devAnk;
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(hipblasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp1));
CheckCublas(hipblasAlloc(M, sizeof(float), (void**)&devTmp2));
CheckCublas(hipblasAlloc(M * M, sizeof(float), (void**)&devAnk));
int iteration = 1;
clock_t t0 = clock();
float beta = 0.01f;
float gamma = 0.5f;
int grid_size;
int block_size;
CudaDirectSolver dslvr(mTask.GetGeneralGridParameters(), true);
printf("Solving multilayer task with Levenberg-Markvardt Method\n");
while (true)
{
//dslvr.SolveDirectMultilayerTask(mTask, Z, A, Ank);
dslvr.SolveDirectTask(Z, A, Ank, mTask[0]);
CheckCublas(hipblasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
CheckCublas(hipblasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(hipblasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
CheckCublas(hipblasSetMatrix(M, M, sizeof(float), Ank.elements, M, devAnk, M));
//A = A - F;
hipblasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(hipblasGetError());
//Tmp1 = AT * A
block_size = 32;
grid_size = M / block_size + (M % block_size != 0 ? 1 : 0);
//KernelCalcAZ<<<block_size, grid_size>>> (M, M, devA, devTmp1, devZ, mTask[0].geltaSigm, mTask[0].asimptHeight, gp.dX, gp.dY, gp.NX, gp.NY);
float u = (A + F).Norm() / F.Norm();
cout << "Iteration: " << iteration << " Error = " << u << endl;
if (u < 0.001f || iteration == 7)
{
break;
}
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(hipblasFree(devAnk));
CheckCublas(hipblasFree(devTmp2));
CheckCublas(hipblasFree(devTmp1));
CheckCublas(hipblasFree(devZ));
CheckCublas(hipblasFree(devA));
CheckCublas(hipblasFree(devF));
return result;
}
| a064626e27fd44a17ab479e9eeb3dca60fb87b1c.cu | #include "FinalSolver.hpp"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "CudaHelpers.hpp"
#include "CudaOperatorReplacer.hpp"
#include "SLESolver.hpp"
#define GRAVY_CONST 6.673
GridData LightLinearisedMinimalError(Task task, float alpha)
{
GridParameters gp = task.grid.GetGridParameters();
int M = gp.NX * gp.NY;
Matrix A(M, 1), F(M, 1), Z(M, 1);
Z.Fill(task.initialZ);
task.grid.FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(task);
float *devF;
float *devA;
float *devZ;
float *devTmp1;
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devZ));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp1));
printf("Solving with Linearised Minimal Error Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectTask(Z, A, task);
error = (F - A).Norm() / F.Norm();
printf("Iteration #%d.\tError = %f.\n", iteration, error);
if (error < task.precision)
break;
CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(cublasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
//A = A - F
cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(cublasGetError());
// ||(A - F)||
float n1 = cublasSnrm2(M, devA, 1);
//S = AnkT(A - F)
oper.CalcATX(devZ, devA, devTmp1);
// ||S||
float n2 = cublasSnrm2(M, devTmp1, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
cublasSaxpy(M, gamma, devTmp1, 1, devZ, 1);
cublasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
Z *= -1.0f;
GridData out(gp);
memcpy(out.data, Z.elements, M * sizeof(float));
CheckCublas(cublasFree(devTmp1));
CheckCublas(cublasFree(devZ));
CheckCublas(cublasFree(devA));
CheckCublas(cublasFree(devF));
return out;
}
GridData LightLinearisedSpeedDescent(Task task, float alpha)
{
GridParameters gp = task.grid.GetGridParameters();
int M = gp.NX * gp.NY;
Matrix A(M, 1), F(M, 1), Z(M, 1);
Z.Fill(task.initialZ);
task.grid.FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(task);
float *devF;
float *devA;
float *devZ;
float *devS;
float *devC;
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devZ));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devS));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devC));
CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(cublasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Speed Descent Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectTask(Z, A, task);
CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
//A = A - F
cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(cublasGetError());
//---------------------Ошибка---------------------
float err1 = cublasSnrm2(M, devA, 1);
CheckCublas(cublasGetError());
float err2 = cublasSnrm2(M, devF, 1);
CheckCublas(cublasGetError());
error = err1 / err2;
printf("Iteration #%d.\tError = %f.\n", iteration, error);
if (error < task.precision)
break;
//---------------------Ошибка---------------------
//S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// C = Ank * S
oper.CalcAX(devZ, devS, devC);
// ||S||
float n1 = cublasSnrm2(M, devS, 1);
// ||C||
float n2 = cublasSnrm2(M, devC, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
cublasSaxpy(M, gamma, devS, 1, devZ, 1);
cublasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
Z *= -1.0f;
GridData out(gp);
memcpy(out.data, Z.elements, M * sizeof(float));
CheckCublas(cublasFree(devC));
CheckCublas(cublasFree(devS));
CheckCublas(cublasFree(devZ));
CheckCublas(cublasFree(devA));
CheckCublas(cublasFree(devF));
return out;
}
//GridData LightLevenbergMarkvardt(Task t, float alpha)
//{
// GridParameters gp = t.grid.GetGridParameters();
// int M = gp.NX * gp.NY;
//
// Matrix A(M, 1), F(M, 1), Z(M, 1), Ank(M, M), Tmp1(M, 1), Tmp2(M, 1);
// Z.Fill(t.initialZ);
// t.grid.FillMatrix(F);
//
// float error;
// int iteration = 1;
//
// CudaDirectSolver dslvr(gp, true);
// CudaOperatorReplacer oper(t);
//
// float *devF;
// float *devA;
// float *devZ;
// float *devTmp1;
// float *devTmp2;
// float *devTmp3;
//
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devZ));
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp1));
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp2));
// CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp3));
//
// CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
// CheckCublas(cublasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
//
// float beta = 0.01f;
// float gamma = 0.5f;
//
// printf("Solving with Levenberg-Markvardt Method\n");
//
// clock_t t0 = clock();
//
// while (true)
// {
// dslvr.SolveDirectTask(Z, A, Ank, t);
//
// CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
//
// //A = A - F;
// //A = A - F
// cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
// CheckCublas(cublasGetError());
//
// //---------------------Ошибка---------------------
// float err1 = cublasSnrm2(M, devA, 1);
// CheckCublas(cublasGetError());
// float err2 = cublasSnrm2(M, devF, 1);
// CheckCublas(cublasGetError());
// error = err1 / err2;
//
// printf("Iteration #%d.\tError = %f.\n", iteration, error);
//
// if (error < t.precision)
// break;
// //---------------------Ошибка---------------------
//
// //Tmp1 = Ank * A;
// //Tmp1 = AnkT(A - F)
// oper.CalcATX(devZ, devA, devTmp1);
//
// //Tmp *= -gamma;
//
// cublasSscal(M, -gamma, devTmp1, 1);
// CheckCublas(cublasGetError());
//
// //Z *= beta;
// //Z += Tmp1;
// cublasSaxpy(M, beta, devZ, 1, devTmp1, 1);
// CheckCublas(cublasGetError());
//
// //Tmp2 = Ank * Z;
// //Z = Ank.Transpose() * Tmp2;
// oper.CalcAX(devZ, devZ, devTmp2);
// oper.CalcATX(devZ, devTmp2, devTmp3);
//
// cublasSaxpy(M, 1.0f, devTmp3, 1, devTmp1, 1);
// //Отсюда в devTmp1 вся правая часть
//
// //******************************Решение системы*************************************************
// // devTmp2 будет за Z, devTmp1 за правую часть
//
//
// //**********************************************************************************************
//
// cublasGetVector(M, sizeof(float), devZ, 1, Z.elements, 1);
//
// iteration++;
// }
//
// printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
// Z *= -1.0f;
// GridData out(gp);
// memcpy(out.data, Z.elements, M * sizeof(float));
//
// CheckCublas(cublasFree(devTmp3));
// CheckCublas(cublasFree(devTmp2));
// CheckCublas(cublasFree(devTmp1));
// CheckCublas(cublasFree(devZ));
// CheckCublas(cublasFree(devA));
// CheckCublas(cublasFree(devF));
//
// return out;
//}
//void CublasSolveSLE(float *devA, float *devz, float *devb, int M)
//{
// float *devzpp;
// float *devrk;
// float *devArk;
// float *devzp;
// float *devpk;
// float *devApk;
// float *devTmp;
// cublasStatus state;
// float normb = cublasSnrm2(M, devb, 1);
//
// state = cublasAlloc(M, sizeof(float), (void**)&devzpp);
// cublasScopy(M, devz, 1, devzpp, 1);
// state = cublasGetError();
//
// state = cublasAlloc(M, sizeof(float), (void**)&devrk);
// cublasScopy(M, devb, 1, devrk, 1);
// state = cublasGetError();
//
// cublasSgemv('T', M, M, 1.0f, devA, M, devzpp, 1, -1.0f, devrk, 1);
// state = cublasGetError();
//
// float normr = cublasSnrm2(M, devrk, 1);
//
// state = cublasAlloc(M, sizeof(float), (void**)&devArk);
// cublasSgemv('T', M, M, 1.0f, devA, M, devrk, 1, 0.0f, devArk, 1);
// state = cublasGetError();
//
// float d = cublasSdot(M, devArk, 1, devrk, 1);
// state = cublasGetError();
//
// state = cublasAlloc(M, sizeof(float), (void**)&devzp);
// cublasScopy(M, devzpp, 1, devzp, 1);
// state = cublasGetError();
//
// cublasSaxpy(M, - (normr * normr / d), devrk, 1, devzp, 1);
// state = cublasGetError();
//
// state = cublasAlloc(M, sizeof(float), (void**)&devpk);
// state = cublasAlloc(M, sizeof(float), (void**)&devApk);
// state = cublasAlloc(M, sizeof(float), (void**)&devTmp);
//
// int flag = 1;
// int iterations = 1;
//
// while (flag == 1)
// {
// cublasScopy(M, devb, 1, devrk, 1);
// state = cublasGetError();
// cublasSgemv('T', M, M, 1.0f, devA, M, devzp, 1, -1.0f, devrk, 1);
// state = cublasGetError();
//
// normr = cublasSnrm2(M, devrk, 1);
// state = cublasGetError();
//
// cublasScopy(M, devzp, 1, devpk, 1);
// state = cublasGetError();
//
// cublasSaxpy(M, -1.0f, devzpp, 1, devpk, 1);
// state = cublasGetError();
//
// cublasSgemv('T', M, M, 1.0f, devA, M, devrk, 1, 0.0f, devArk, 1);
// state = cublasGetError();
// cublasSgemv('T', M, M, 1.0f, devA, M, devpk, 1, 0.0f, devApk, 1);
// state = cublasGetError();
//
// float dot1 = cublasSdot(M, devArk, 1, devpk, 1);
// state = cublasGetError();
// float dot2 = cublasSdot(M, devrk, 1, devpk, 1);
// state = cublasGetError();
// float dot3 = cublasSdot(M, devArk, 1, devrk, 1);
// state = cublasGetError();
// float dot4 = cublasSdot(M, devApk, 1, devpk, 1);
// state = cublasGetError();
//
// d = dot3 * dot4 - dot1 * dot1;
//
// float gamma = ((normr * normr) * dot4 - dot2 * dot1) / d;
// float beta = ((normr * normr) * dot1 - dot2 * dot3) / d;
//
// cublasScopy(M, devzp, 1, devzpp, 1);
// state = cublasGetError();
//
// cublasSaxpy(M, -gamma, devrk, 1, devzp, 1);
// state = cublasGetError();
// cublasSaxpy(M, beta, devpk, 1, devzp, 1);
// state = cublasGetError();
//
// cublasScopy(M, devb, 1, devTmp, 1);
// state = cublasGetError();
//
// cublasSgemv('T', M, M, 1.0f, devA, M, devzp, 1, -1.0f, devTmp, 1);
// state = cublasGetError();
//
// double norm = cublasSnrm2(M, devTmp, 1);
// state = cublasGetError();
//
// double error = norm / normb;
//
// printf(" Iteration:%d\terror:%f\n", iterations, error);
//
// if (error < 0.001)
// flag = 0;
//
// iterations++;
// }
//
// cublasFree(devzp);
// cublasFree(devzpp);
// cublasFree(devArk);
// cublasFree(devApk);
// cublasFree(devrk);
// cublasFree(devpk);
// cublasFree(devTmp);
//
// return;
//}
GridData LightLevenbergMarkvardt(Task t, float alpha)
{
GridParameters gp = t.grid.GetGridParameters();
DirectSolver dslvr(gp);
int M = gp.NX * gp.NY;
int i, j, l, k;
Matrix F(M, 1), A(M, 1), Z(M, 1), TMP1(M, 1), TMP2(M, 1);
Matrix Ank(M, M), Bnk(M, M);
Matrix exactZ(M, 1);
exactZ *= -1.0f;
t.grid.FillMatrix(F);
F *= -1.0f;
t.exactSolution.FillMatrix(exactZ);
Z.Fill(t.initialZ);
cublasStatus state;
float *devA;
float *devAnk;
float *devF;
float *devZ;
float *devTmpV1;
float *devB;
state = cublasAlloc(M, sizeof(float), (void**)&devA);
state = cublasAlloc(M * M, sizeof(float), (void**)&devAnk);
state = cublasAlloc(M, sizeof(float), (void**)&devF);
state = cublasAlloc(M, sizeof(float), (void**)&devZ);
state = cublasAlloc(M, sizeof(float), (void**)&devTmpV1);
state = cublasAlloc(M * M, sizeof(float), (void**)&devB);
float beta = 0.01f;
float gamma = 0.5f;
int iteration = 1;
while (true)
{
dslvr.SolveDirectTask(Z, A, Ank, t);
//A *= -1.0f;
state = cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1);
state = cublasSetMatrix(M, M, sizeof(float), Ank.elements, M, devAnk, M);
state = cublasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1);
state = cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1);
float err = (Z + exactZ).Norm() / exactZ.Norm();
float u = (A + F).Norm() / F.Norm();
cout << "Levenberg-Markvardt error = " << u << " 2 = " << err << endl;
if (u < t.precision)
{
break;
}
//A - F
cublasScopy(M, devA, 1, devTmpV1, 1);
state = cublasGetError();
cublasSaxpy(M, 1.0f, devF, 1, devTmpV1, 1);
state = cublasGetError();
cublasScopy(M, devTmpV1, 1, devF, 1);
state = cublasGetError();
cublasSgemv('N', M, M, gamma, devAnk, M, devF, 1, 0.0f, devTmpV1, 1);
state = cublasGetError();
cublasScopy(M, devTmpV1, 1, devF, 1);
state = cublasGetError();
cublasSgemm('N', 'T', M, M, M, 1.0f, devAnk, M, devAnk, M, 0.0f, devB, M);
state = cublasGetError();
cublasSaxpy(M, beta, devZ, 1, devF, 1);
state = cublasGetError();
cublasSgemv('T', M, M, 1.0f, devB, M, devZ, 1, 1.0f, devF, 1);
state = cublasGetError();
state = cublasGetVector(M, sizeof(float), devF, 1, TMP2.elements, 1);
state = cublasGetMatrix(M, M, sizeof(float), devB, M, Ank.elements, M);
Ank.AddToDiagonal(beta);
SolveSLE(Ank, Z, TMP2, t.initialZ);
iteration++;
}
Z *= -1.0f;
GridData result(gp);
memcpy(result.data, Z.elements, M * sizeof(float));
return result;
}
__global__ void Dempfers(float *Z, float *dempf, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N)
return;
Z[index] *= dempf[index];
}
vector<GridData> LightMultilayerLinearisedMinimalError(MultilayerTask mTask, float alpha)
{
GridParameters gp = mTask.GetGeneralGridParameters();
int M = gp.NX * gp.NY;
int L = mTask.GetLayersCount();
Matrix A(M, 1), F(M, 1), TMP1(L * M, 1), Z(L * M, 1), Ze(L * M, 1);
mTask.InitZ(Z);
mTask.GetGeneralField().FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(mTask);
float *devF;
float *devA;
float *devZ;
float *devS;
float *dempf;
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&devS));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&dempf));
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
memcpy(&Ze.elements[i * M], mTask[i].exactSolution.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] = pow(abs(Demp.elements[i]), 1.2f);
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//Нужно только раз
CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(cublasSetVector(L * M, sizeof(float), Demp.elements, 1, dempf, 1));
CheckCublas(cublasSetVector(L * M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Minimal Error Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectMultilayerTask(mTask, Z, A);
CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
// A = A - F
cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(cublasGetError());
// ||(A - F)||
float n1 = cublasSnrm2(M, devA, 1);
//---------------------Ошибка---------------------
float err2 = cublasSnrm2(M, devF, 1);
CheckCublas(cublasGetError());
error = n1 / err2;
printf("Iteration #%d.\tError = %f. Zerror = %f\n", iteration, error, (Ze - Z).Norm() / Ze.Norm());
if (error < mTask[0].precision)
break;
//---------------------Ошибка---------------------
// S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// ||S||
float n2 = cublasSnrm2(L * M, devS, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
int block_size = 256;
int grid_size = L * M / block_size + (L * M % block_size != 0 ? 1 : 0);
Dempfers<<<grid_size, block_size>>>(devS, dempf, L * M);
CheckCuda(cudaGetLastError());
cublasSaxpy(L * M, gamma, devS, 1, devZ, 1);
CheckCublas(cublasGetVector(L * M, sizeof(float), devZ, 1, Z.elements, 1));
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(cublasFree(dempf));
CheckCublas(cublasFree(devS));
CheckCublas(cublasFree(devZ));
CheckCublas(cublasFree(devA));
CheckCublas(cublasFree(devF));
return result;
}
vector<GridData> LightMultilayerLinearisedSpeedDescent(MultilayerTask mTask, float alpha)
{
GridParameters gp = mTask.GetGeneralGridParameters();
int M = gp.NX * gp.NY;
int L = mTask.GetLayersCount();
Matrix A(M, 1), F(M, 1), Z(L * M, 1), Ze(L * M, 1);
mTask.InitZ(Z);
mTask.GetGeneralField().FillMatrix(F);
float error;
int iteration = 1;
CudaDirectSolver dslvr(gp, false);
CudaOperatorReplacer oper(mTask);
float *devF;
float *devA;
float *devZ;
float *devS;
float *devC;
float *dempf;
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&devS));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devC));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&dempf));
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
memcpy(&Ze.elements[i * M], mTask[i].exactSolution.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] = pow(abs(Demp.elements[i]), 1.2f);
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//Нужно только раз
CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(cublasSetVector(L * M, sizeof(float), Demp.elements, 1, dempf, 1));
CheckCublas(cublasSetVector(L * M, sizeof(float), Z.elements, 1, devZ, 1));
printf("Solving with Linearised Speed Descent Method\n");
clock_t t0 = clock();
while (true)
{
dslvr.SolveDirectMultilayerTask(mTask, Z, A);
CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
// A = A - F
cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(cublasGetError());
//---------------------Ошибка---------------------
float err1 = cublasSnrm2(M, devA, 1);
CheckCublas(cublasGetError());
float err2 = cublasSnrm2(M, devF, 1);
CheckCublas(cublasGetError());
error = err1 / err2;
printf("Iteration #%d.\tError = %f. Zerror = %f\n", iteration, error, (Ze - Z).Norm() / Ze.Norm());
if (error < mTask[0].precision)
break;
//---------------------Ошибка---------------------
// S = AnkT(A - F)
oper.CalcATX(devZ, devA, devS);
// ||S||
float n1 = cublasSnrm2(L * M, devS, 1);
// C = Ank * S
oper.CalcAX(devZ, devS, devC);
// ||C||
float n2 = cublasSnrm2(M, devC, 1);
float gamma = (alpha * n1 * n1) / (n2 * n2);
int block_size = 256;
int grid_size = L * M / block_size + (L * M % block_size != 0 ? 1 : 0);
Dempfers<<<grid_size, block_size>>>(devS, dempf, L * M);
CheckCuda(cudaGetLastError());
cublasSaxpy(L * M, gamma, devS, 1, devZ, 1);
CheckCublas(cublasGetVector(L * M, sizeof(float), devZ, 1, Z.elements, 1));
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(cublasFree(dempf));
CheckCublas(cublasFree(devS));
CheckCublas(cublasFree(devZ));
CheckCublas(cublasFree(devA));
CheckCublas(cublasFree(devF));
return result;
}
vector<GridData> LightMultilayerLevenbergMarkvardt(MultilayerTask mTask, float alpha)
{
//Definitions
GridParameters gp = mTask.GetGeneralGridParameters();
int L = mTask.GetLayersCount();
int M = gp.NX * gp.NY;
Matrix Ank(L * M, M), Ank1(M,L * M), B(M, M);
Matrix A(M, 1), F(M, 1), TMP1(M, 1), TMP2(M, 1);
Matrix Z(L * M, 1);
//Initialising of Z vector
for (int i = 0; i < L; i++)
{
for(int j = 0; j < M; j++)
{
Z.elements[i * M + j] = mTask[i].initialZ;
}
}
//Form dempfer values
Matrix Demp(L * M, 1);
for (int i = 0; i < L; i++)
{
memcpy(&Demp.elements[i * M], mTask[i].grid.data, M * sizeof(float));
}
float maxVal = -10000000.0f;
for(int i = 0; i < L * M; i++)
{
if (abs(Demp.elements[i]) > maxVal)
{
maxVal = Demp.elements[i];
}
}
for(int i = 0; i < L * M; i++)
{
Demp.elements[i] /= maxVal;
}
//Initalising of general field matrix
mTask.GetGeneralField().FillMatrix(F);
//F *= -1.0f;
//Error value declaration
float precision = 100000000.0f;
for (int i = 0; i < L; i++)
{
if (mTask[i].precision < 0)
{
throw new string("Invalid precision value");
}
if (mTask[i].precision < precision)
{
precision = mTask[i].precision;
}
}
float *devF;
float *devA;
float *devZ;
float *devTmp1;
float *devTmp2;
float *devAnk;
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devF));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devA));
CheckCublas(cublasAlloc(L * M, sizeof(float), (void**)&devZ));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp1));
CheckCublas(cublasAlloc(M, sizeof(float), (void**)&devTmp2));
CheckCublas(cublasAlloc(M * M, sizeof(float), (void**)&devAnk));
int iteration = 1;
clock_t t0 = clock();
float beta = 0.01f;
float gamma = 0.5f;
int grid_size;
int block_size;
CudaDirectSolver dslvr(mTask.GetGeneralGridParameters(), true);
printf("Solving multilayer task with Levenberg-Markvardt Method\n");
while (true)
{
//dslvr.SolveDirectMultilayerTask(mTask, Z, A, Ank);
dslvr.SolveDirectTask(Z, A, Ank, mTask[0]);
CheckCublas(cublasSetVector(M, sizeof(float), A.elements, 1, devA, 1));
CheckCublas(cublasSetVector(M, sizeof(float), F.elements, 1, devF, 1));
CheckCublas(cublasSetVector(M, sizeof(float), Z.elements, 1, devZ, 1));
CheckCublas(cublasSetMatrix(M, M, sizeof(float), Ank.elements, M, devAnk, M));
//A = A - F;
cublasSaxpy(M, -1.0f, devF, 1, devA, 1);
CheckCublas(cublasGetError());
//Tmp1 = AT * A
block_size = 32;
grid_size = M / block_size + (M % block_size != 0 ? 1 : 0);
//KernelCalcAZ<<<block_size, grid_size>>> (M, M, devA, devTmp1, devZ, mTask[0].geltaSigm, mTask[0].asimptHeight, gp.dX, gp.dY, gp.NX, gp.NY);
float u = (A + F).Norm() / F.Norm();
cout << "Iteration: " << iteration << " Error = " << u << endl;
if (u < 0.001f || iteration == 7)
{
break;
}
iteration++;
}
printf("Solving finished for %f sec\n", (double)(clock() - t0) / CLOCKS_PER_SEC);
vector<GridData> result;
Z *= -1.0;
for (int i = 0; i < L; i++)
{
GridData layer(gp);
memcpy(layer.data, &Z.elements[i * M], M * sizeof(float));
result.push_back(layer);
}
CheckCublas(cublasFree(devAnk));
CheckCublas(cublasFree(devTmp2));
CheckCublas(cublasFree(devTmp1));
CheckCublas(cublasFree(devZ));
CheckCublas(cublasFree(devA));
CheckCublas(cublasFree(devF));
return result;
}
|
ea2c7474221f3db6f108974899ab9108d422e95d.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2010 ASTRE Henri (http://www.visual-experiments.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cudpp.h>
#include <Feature.h>
//DirectX
#define BINDEX 0
#define GINDEX 1
#define RINDEX 2
#define AINDEX 3
//OpenGL
/*
#define RINDEX 0
#define GINDEX 1
#define BINDEX 2
#define AINDEX 3
*/
void saveToFile(float* buffer, float* buffer2, unsigned int size);
void saveToFile(Feature* buffer, unsigned int size);
void checkCUDAError(const char *msg);
__device__ float getNbMaximum(unsigned char* pixel)
{
unsigned char extremum = pixel[BINDEX];
unsigned char mask = 1;
float nbMaximumFound = 0;
for (unsigned int s=0; s<4; ++s)
{
if (extremum & mask)
nbMaximumFound++;
mask = mask<<1;
}
return nbMaximumFound;
}
__device__ float extractFeatureLocation(unsigned char* pixel, Feature* features, unsigned int start_index, float xpos, float ypos, int octave)
{
unsigned char extremum = pixel[BINDEX];
unsigned char mask = 1;
unsigned int nbMaximumFound = 0;
for (unsigned int s=0; s<4; ++s)
{
if (extremum & mask)
{
float x = xpos*2 + ((pixel[RINDEX] & mask) != 0);
float y = ypos*2 + ((pixel[GINDEX] & mask) != 0);
Feature feat(x, y, s, octave);
features[start_index + nbMaximumFound] = feat;
nbMaximumFound++;
}
mask = mask<<1;
}
return nbMaximumFound;
}
__global__ void extractFeatureLocationPass1(float* out_counter, unsigned char* in_texture, size_t width, size_t height)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < width)
{
float nbMaximumFound = 0;
for (unsigned int i=0; i<height; ++i)
{
unsigned char* pixel = (unsigned char*)(in_texture + i*width*4 + 4*x);
nbMaximumFound += getNbMaximum(pixel);
}
out_counter[x] = nbMaximumFound;
}
}
__global__ void extractFeatureLocationPass2(Feature* out_features, unsigned char* in_texture, float* in_index_start, int index_start, int octave, size_t width, size_t height)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < width)
{
float nbMaximumFound = in_index_start[x] + index_start;
for (unsigned int i=0; i<height; ++i)
{
unsigned char* pixel = (unsigned char*)(in_texture + i*width*4 + 4*x);
nbMaximumFound += extractFeatureLocation(pixel, out_features, nbMaximumFound, x, i, octave);
}
}
}
__global__ void copyCuda2Text1D(float* out_texture, Feature* in_features, int nbFeatureFound, int width)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < nbFeatureFound)
{
float* pixel = (float*) (out_texture + x*4);
Feature* feature = &in_features[x];
pixel[BINDEX] = feature->x;
pixel[GINDEX] = feature->y;
pixel[RINDEX] = feature->scale;
pixel[AINDEX] = feature->octave;
}
else if (x < width)
{
float* pixel = (float*) (out_texture + x*4);
pixel[BINDEX] = 0;
pixel[GINDEX] = 0;
pixel[RINDEX] = 0;
pixel[AINDEX] = 0;
}
}
__global__ void copyText1D2Cuda(Feature* out_features, float* in_texture, int nbFeatureFound, int width)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < nbFeatureFound)
{
float* pixel = (float*) (in_texture + x*4);
Feature* feature = &out_features[x];
/*
feature->x += pixel[BINDEX];
feature->y += pixel[GINDEX];
*/
feature->x = pixel[BINDEX];
feature->y = pixel[GINDEX];
feature->scale = pixel[RINDEX];
feature->octave = pixel[AINDEX];
}
else if (x < width)
{
Feature* feature = &out_features[x];
feature->scale = 0;
feature->y = 0;
feature->x = 0;
feature->octave = 0;
}
}
extern "C" void copyCuda2Tex1D(int width, int height, void* deviceTexture, Feature* deviceFeatures, unsigned int nbFeatureFound)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
hipLaunchKernelGGL(( copyCuda2Text1D), dim3(grid), dim3(block), 0, 0, (float*) deviceTexture, deviceFeatures, nbFeatureFound, width);
}
extern "C" void copyTex1D2Cuda(Feature* deviceFeatures, int width, int height, void* deviceTexture, unsigned int nbFeatureFound)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
hipLaunchKernelGGL(( copyText1D2Cuda), dim3(grid), dim3(block), 0, 0, deviceFeatures, (float*) deviceTexture, nbFeatureFound, width);
}
extern "C" int extractFeatureLocationCuda(size_t width, size_t height, void* deviceTexture,
CUDPPHandle& scanPlan,
int octave,
float* devicePass1,
float* devicePass2,
#ifdef GPUSURF_HOST_DEBUG
float* hostPass1,
float* hostPass2,
#endif
Feature* deviceFeatures,
int featureStartIndex)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
//printf("[%d] %dx%d -> %d\n", octave, width, height, grid.x);
hipLaunchKernelGGL(( extractFeatureLocationPass1), dim3(grid), dim3(block), 0, 0, devicePass1, (unsigned char*) deviceTexture, width, height);
cudppScan(scanPlan, devicePass2, devicePass1, width);
hipLaunchKernelGGL(( extractFeatureLocationPass2), dim3(grid), dim3(block), 0, 0, deviceFeatures, (unsigned char*) deviceTexture, devicePass2, featureStartIndex, octave, width, height);
float nbFeature = 0;
hipMemcpy(&nbFeature, devicePass2+(width-1), sizeof(float), hipMemcpyDeviceToHost);
#ifdef GPUSURF_HOST_DEBUG
memset(hostPass1, 0, width*sizeof(float));
memset(hostPass2, 0, width*sizeof(float));
hipMemcpy(hostPass1, devicePass1, width*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(hostPass2, devicePass2, width*sizeof(float), hipMemcpyDeviceToHost);
//printf("[%d] nb feature found = %4f = %4f\n", octave, nbFeature, hostPass2[width-1]);
saveToFile(hostPass1, hostPass2, width);
#endif
return (int) nbFeature;
}
void saveToFile(float* buffer, float* buffer2, unsigned int size)
{
FILE* fp = fopen("test.txt", "w");
for (unsigned int i=0; i<size; ++i)
{
fprintf(fp, "[%d] -> %8.3f\t %8.3f\n", i, buffer[i], buffer2[i]);
}
fclose(fp);
}
void saveToFile(Feature* buffer, unsigned int size)
{
FILE* fp = fopen("test_feature.txt", "w");
for (unsigned int i=0; i<size; ++i)
{
fprintf(fp, "[%d] -> %8.3f\t %8.3f\n", i, buffer[i].x, buffer[i].y);
}
fclose(fp);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} | ea2c7474221f3db6f108974899ab9108d422e95d.cu | /*
Copyright (c) 2010 ASTRE Henri (http://www.visual-experiments.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cudpp.h>
#include <Feature.h>
//DirectX
#define BINDEX 0
#define GINDEX 1
#define RINDEX 2
#define AINDEX 3
//OpenGL
/*
#define RINDEX 0
#define GINDEX 1
#define BINDEX 2
#define AINDEX 3
*/
void saveToFile(float* buffer, float* buffer2, unsigned int size);
void saveToFile(Feature* buffer, unsigned int size);
void checkCUDAError(const char *msg);
__device__ float getNbMaximum(unsigned char* pixel)
{
unsigned char extremum = pixel[BINDEX];
unsigned char mask = 1;
float nbMaximumFound = 0;
for (unsigned int s=0; s<4; ++s)
{
if (extremum & mask)
nbMaximumFound++;
mask = mask<<1;
}
return nbMaximumFound;
}
__device__ float extractFeatureLocation(unsigned char* pixel, Feature* features, unsigned int start_index, float xpos, float ypos, int octave)
{
unsigned char extremum = pixel[BINDEX];
unsigned char mask = 1;
unsigned int nbMaximumFound = 0;
for (unsigned int s=0; s<4; ++s)
{
if (extremum & mask)
{
float x = xpos*2 + ((pixel[RINDEX] & mask) != 0);
float y = ypos*2 + ((pixel[GINDEX] & mask) != 0);
Feature feat(x, y, s, octave);
features[start_index + nbMaximumFound] = feat;
nbMaximumFound++;
}
mask = mask<<1;
}
return nbMaximumFound;
}
__global__ void extractFeatureLocationPass1(float* out_counter, unsigned char* in_texture, size_t width, size_t height)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < width)
{
float nbMaximumFound = 0;
for (unsigned int i=0; i<height; ++i)
{
unsigned char* pixel = (unsigned char*)(in_texture + i*width*4 + 4*x);
nbMaximumFound += getNbMaximum(pixel);
}
out_counter[x] = nbMaximumFound;
}
}
__global__ void extractFeatureLocationPass2(Feature* out_features, unsigned char* in_texture, float* in_index_start, int index_start, int octave, size_t width, size_t height)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < width)
{
float nbMaximumFound = in_index_start[x] + index_start;
for (unsigned int i=0; i<height; ++i)
{
unsigned char* pixel = (unsigned char*)(in_texture + i*width*4 + 4*x);
nbMaximumFound += extractFeatureLocation(pixel, out_features, nbMaximumFound, x, i, octave);
}
}
}
__global__ void copyCuda2Text1D(float* out_texture, Feature* in_features, int nbFeatureFound, int width)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < nbFeatureFound)
{
float* pixel = (float*) (out_texture + x*4);
Feature* feature = &in_features[x];
pixel[BINDEX] = feature->x;
pixel[GINDEX] = feature->y;
pixel[RINDEX] = feature->scale;
pixel[AINDEX] = feature->octave;
}
else if (x < width)
{
float* pixel = (float*) (out_texture + x*4);
pixel[BINDEX] = 0;
pixel[GINDEX] = 0;
pixel[RINDEX] = 0;
pixel[AINDEX] = 0;
}
}
__global__ void copyText1D2Cuda(Feature* out_features, float* in_texture, int nbFeatureFound, int width)
{
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
if (x < nbFeatureFound)
{
float* pixel = (float*) (in_texture + x*4);
Feature* feature = &out_features[x];
/*
feature->x += pixel[BINDEX];
feature->y += pixel[GINDEX];
*/
feature->x = pixel[BINDEX];
feature->y = pixel[GINDEX];
feature->scale = pixel[RINDEX];
feature->octave = pixel[AINDEX];
}
else if (x < width)
{
Feature* feature = &out_features[x];
feature->scale = 0;
feature->y = 0;
feature->x = 0;
feature->octave = 0;
}
}
extern "C" void copyCuda2Tex1D(int width, int height, void* deviceTexture, Feature* deviceFeatures, unsigned int nbFeatureFound)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
copyCuda2Text1D<<<grid, block, 0>>>((float*) deviceTexture, deviceFeatures, nbFeatureFound, width);
}
extern "C" void copyTex1D2Cuda(Feature* deviceFeatures, int width, int height, void* deviceTexture, unsigned int nbFeatureFound)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
copyText1D2Cuda<<<grid, block, 0>>>(deviceFeatures, (float*) deviceTexture, nbFeatureFound, width);
}
extern "C" int extractFeatureLocationCuda(size_t width, size_t height, void* deviceTexture,
CUDPPHandle& scanPlan,
int octave,
float* devicePass1,
float* devicePass2,
#ifdef GPUSURF_HOST_DEBUG
float* hostPass1,
float* hostPass2,
#endif
Feature* deviceFeatures,
int featureStartIndex)
{
dim3 block(16, 1, 1);
dim3 grid(width / block.x, 1, 1);
//printf("[%d] %dx%d -> %d\n", octave, width, height, grid.x);
extractFeatureLocationPass1<<<grid, block, 0>>>(devicePass1, (unsigned char*) deviceTexture, width, height);
cudppScan(scanPlan, devicePass2, devicePass1, width);
extractFeatureLocationPass2<<<grid, block, 0>>>(deviceFeatures, (unsigned char*) deviceTexture, devicePass2, featureStartIndex, octave, width, height);
float nbFeature = 0;
cudaMemcpy(&nbFeature, devicePass2+(width-1), sizeof(float), cudaMemcpyDeviceToHost);
#ifdef GPUSURF_HOST_DEBUG
memset(hostPass1, 0, width*sizeof(float));
memset(hostPass2, 0, width*sizeof(float));
cudaMemcpy(hostPass1, devicePass1, width*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(hostPass2, devicePass2, width*sizeof(float), cudaMemcpyDeviceToHost);
//printf("[%d] nb feature found = %4f = %4f\n", octave, nbFeature, hostPass2[width-1]);
saveToFile(hostPass1, hostPass2, width);
#endif
return (int) nbFeature;
}
void saveToFile(float* buffer, float* buffer2, unsigned int size)
{
FILE* fp = fopen("test.txt", "w");
for (unsigned int i=0; i<size; ++i)
{
fprintf(fp, "[%d] -> %8.3f\t %8.3f\n", i, buffer[i], buffer2[i]);
}
fclose(fp);
}
void saveToFile(Feature* buffer, unsigned int size)
{
FILE* fp = fopen("test_feature.txt", "w");
for (unsigned int i=0; i<size; ++i)
{
fprintf(fp, "[%d] -> %8.3f\t %8.3f\n", i, buffer[i].x, buffer[i].y);
}
fclose(fp);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
4e7071a99527f798b3fbfe4ce98666cfd929f6d5.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
#include "matx/transforms/transpose.h"
using namespace matx;
constexpr int m = 15;
template <typename T> class DetSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<detail::MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "det", "run", {m});
pb->NumpyToTensorView(Av, "A");
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> Av{{m, m}};
tensor_t<T, 2> Atv{{m, m}};
tensor_t<T, 0> detv{{}};
};
template <typename TensorType>
class DetSolverTestNonComplexFloatTypes : public DetSolverTest<TensorType> {
};
TYPED_TEST_SUITE(DetSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(DetSolverTestNonComplexFloatTypes, Determinant)
{
MATX_ENTER_HANDLER();
// cuSolver only supports col-major solving today, so we need to transpose,
// solve, then transpose again to compare to Python
(this->Atv = transpose(this->Av)).run();
(this->detv = det(this->Atv)).run();
(this->Av = transpose(this->Atv)).run(); // Transpose back to row-major
hipStreamSynchronize(0);
MATX_TEST_ASSERT_COMPARE(this->pb, this->detv, "det", 0.1);
MATX_EXIT_HANDLER();
}
| 4e7071a99527f798b3fbfe4ce98666cfd929f6d5.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
#include "matx/transforms/transpose.h"
using namespace matx;
constexpr int m = 15;
template <typename T> class DetSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<detail::MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "det", "run", {m});
pb->NumpyToTensorView(Av, "A");
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> Av{{m, m}};
tensor_t<T, 2> Atv{{m, m}};
tensor_t<T, 0> detv{{}};
};
template <typename TensorType>
class DetSolverTestNonComplexFloatTypes : public DetSolverTest<TensorType> {
};
TYPED_TEST_SUITE(DetSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(DetSolverTestNonComplexFloatTypes, Determinant)
{
MATX_ENTER_HANDLER();
// cuSolver only supports col-major solving today, so we need to transpose,
// solve, then transpose again to compare to Python
(this->Atv = transpose(this->Av)).run();
(this->detv = det(this->Atv)).run();
(this->Av = transpose(this->Atv)).run(); // Transpose back to row-major
cudaStreamSynchronize(0);
MATX_TEST_ASSERT_COMPARE(this->pb, this->detv, "det", 0.1);
MATX_EXIT_HANDLER();
}
|
9fe1d0c046c920d2ffedc30290992423e14763f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/primitive/include/add.h"
#include "oneflow/core/primitive/cuda/type_seq.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/stream/cuda_stream_context.h"
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace primitive {
namespace {
template<typename... Args>
struct AddFunctor;
template<typename T>
struct AddFunctor<T> {
__device__ T operator()(T x) const { return x; }
};
template<typename T, typename U, typename... Args>
struct AddFunctor<T, U, Args...> {
__device__ T operator()(T x0, U x1, Args... xs) const {
return x0 + AddFunctor<U, Args...>()(x1, xs...);
}
};
template<typename T, typename... Args>
__global__ void AddGpu(const Args*... srcs, T* dst, size_t count) {
CUDA_1D_KERNEL_LOOP_T(size_t, i, count) { dst[i] = AddFunctor<Args...>()(srcs[i]...); }
}
template<typename T, typename... Args>
void LaunchAddGpu(hipStream_t stream, const Args*... srcs, T* dst, size_t count) {
hipLaunchKernelGGL(( AddGpu<T, Args...>)
, dim3(BlocksNum4ThreadsNum(count)), dim3(kCudaThreadsNumPerBlock), 0, stream, srcs..., dst, count);
}
template<typename T>
void DispatchLaunch(hipStream_t stream, const T* const* srcs, size_t arity, T* dst, size_t count) {
if (arity == 0) {
OF_CUDA_CHECK(hipMemsetAsync(dst, 0, count * sizeof(T), stream));
} else if (arity == 1) {
OF_CUDA_CHECK(hipMemcpyAsync(dst, srcs[0], count * sizeof(T), hipMemcpyDefault, stream));
} else if (arity == 2) {
OF_CUDA_CHECK((cuda::elementwise::Binary<AddFunctor<T, T>, T, T, T>(
AddFunctor<T, T>(), count, dst, srcs[0], srcs[1], stream)));
} else if (arity == 3) {
OF_CUDA_CHECK((cuda::elementwise::Ternary<AddFunctor<T, T, T>, T, T, T, T>(
AddFunctor<T, T, T>(), count, dst, srcs[0], srcs[1], srcs[2], stream)));
} else if (arity == 4) {
LaunchAddGpu<T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], dst, count);
} else if (arity == 5) {
LaunchAddGpu<T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], dst, count);
} else if (arity == 6) {
LaunchAddGpu<T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5],
dst, count);
} else if (arity == 7) {
LaunchAddGpu<T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, count);
} else if (arity == 8) {
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], srcs[7], dst, count);
} else {
DispatchLaunch(stream, srcs + 7, arity - 7, dst, count);
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, dst, count);
}
}
template<typename T>
class AddImpl : public Add {
public:
OF_DISALLOW_COPY_AND_MOVE(AddImpl);
AddImpl() = default;
~AddImpl() override = default;
using Add::Launch;
void Launch(StreamContext* stream_ctx, const void* const* srcs, size_t arity, void* dst,
size_t count) override {
hipStream_t cuda_stream =
CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream();
DispatchLaunch(cuda_stream, reinterpret_cast<const T* const*>(srcs), arity,
reinterpret_cast<T*>(dst), count);
}
};
template<typename T>
std::unique_ptr<Add> NewAdd() {
return std::unique_ptr<Add>(new AddImpl<T>());
}
class AddFactoryImpl : public AddFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(AddFactoryImpl);
AddFactoryImpl() = default;
~AddFactoryImpl() override = default;
std::unique_ptr<Add> New(DataType data_type) override {
#define MAKE_NEW_ADD_ENTRY(type_cpp, type_proto) {type_proto, NewAdd<type_cpp>},
static const std::map<DataType, std::function<std::unique_ptr<Add>()>> new_add_handle{
OF_PP_FOR_EACH_TUPLE(MAKE_NEW_ADD_ENTRY, CUDA_PRIMITIVE_ALL_TYPE_SEQ)};
#undef MAKE_NEW_ADD_ENTRY
const auto it = new_add_handle.find(data_type);
if (it != new_add_handle.end()) {
return it->second();
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, AddFactory, AddFactoryImpl);
} // namespace
} // namespace primitive
} // namespace oneflow
| 9fe1d0c046c920d2ffedc30290992423e14763f6.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/primitive/include/add.h"
#include "oneflow/core/primitive/cuda/type_seq.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/stream/cuda_stream_context.h"
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace primitive {
namespace {
template<typename... Args>
struct AddFunctor;
template<typename T>
struct AddFunctor<T> {
__device__ T operator()(T x) const { return x; }
};
template<typename T, typename U, typename... Args>
struct AddFunctor<T, U, Args...> {
__device__ T operator()(T x0, U x1, Args... xs) const {
return x0 + AddFunctor<U, Args...>()(x1, xs...);
}
};
template<typename T, typename... Args>
__global__ void AddGpu(const Args*... srcs, T* dst, size_t count) {
CUDA_1D_KERNEL_LOOP_T(size_t, i, count) { dst[i] = AddFunctor<Args...>()(srcs[i]...); }
}
template<typename T, typename... Args>
void LaunchAddGpu(cudaStream_t stream, const Args*... srcs, T* dst, size_t count) {
AddGpu<T, Args...>
<<<BlocksNum4ThreadsNum(count), kCudaThreadsNumPerBlock, 0, stream>>>(srcs..., dst, count);
}
template<typename T>
void DispatchLaunch(cudaStream_t stream, const T* const* srcs, size_t arity, T* dst, size_t count) {
if (arity == 0) {
OF_CUDA_CHECK(cudaMemsetAsync(dst, 0, count * sizeof(T), stream));
} else if (arity == 1) {
OF_CUDA_CHECK(cudaMemcpyAsync(dst, srcs[0], count * sizeof(T), cudaMemcpyDefault, stream));
} else if (arity == 2) {
OF_CUDA_CHECK((cuda::elementwise::Binary<AddFunctor<T, T>, T, T, T>(
AddFunctor<T, T>(), count, dst, srcs[0], srcs[1], stream)));
} else if (arity == 3) {
OF_CUDA_CHECK((cuda::elementwise::Ternary<AddFunctor<T, T, T>, T, T, T, T>(
AddFunctor<T, T, T>(), count, dst, srcs[0], srcs[1], srcs[2], stream)));
} else if (arity == 4) {
LaunchAddGpu<T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], dst, count);
} else if (arity == 5) {
LaunchAddGpu<T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], dst, count);
} else if (arity == 6) {
LaunchAddGpu<T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5],
dst, count);
} else if (arity == 7) {
LaunchAddGpu<T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, count);
} else if (arity == 8) {
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], srcs[7], dst, count);
} else {
DispatchLaunch(stream, srcs + 7, arity - 7, dst, count);
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, dst, count);
}
}
template<typename T>
class AddImpl : public Add {
public:
OF_DISALLOW_COPY_AND_MOVE(AddImpl);
AddImpl() = default;
~AddImpl() override = default;
using Add::Launch;
void Launch(StreamContext* stream_ctx, const void* const* srcs, size_t arity, void* dst,
size_t count) override {
cudaStream_t cuda_stream =
CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream();
DispatchLaunch(cuda_stream, reinterpret_cast<const T* const*>(srcs), arity,
reinterpret_cast<T*>(dst), count);
}
};
template<typename T>
std::unique_ptr<Add> NewAdd() {
return std::unique_ptr<Add>(new AddImpl<T>());
}
class AddFactoryImpl : public AddFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(AddFactoryImpl);
AddFactoryImpl() = default;
~AddFactoryImpl() override = default;
std::unique_ptr<Add> New(DataType data_type) override {
#define MAKE_NEW_ADD_ENTRY(type_cpp, type_proto) {type_proto, NewAdd<type_cpp>},
static const std::map<DataType, std::function<std::unique_ptr<Add>()>> new_add_handle{
OF_PP_FOR_EACH_TUPLE(MAKE_NEW_ADD_ENTRY, CUDA_PRIMITIVE_ALL_TYPE_SEQ)};
#undef MAKE_NEW_ADD_ENTRY
const auto it = new_add_handle.find(data_type);
if (it != new_add_handle.end()) {
return it->second();
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, AddFactory, AddFactoryImpl);
} // namespace
} // namespace primitive
} // namespace oneflow
|
17551a1f8140af0fe03511de66b61e0cff36c3c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include "adaptive_sigmoid.h"
#define CUDA_KERNEL_LOOP(i ,n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__global__ void adaptive_sigmoid_fucntion_kernel(
int n,
const float* data_in,
const float* params,
float* output
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float theta = params[3];
float value = data_in[index];
// output[index] = gamma * (1 / (1 + exp(-alpha * (value - beta)))) + theta;
output[index] = gamma * (1 / (1 + exp(-alpha * (value - beta))) - theta);
}
}
__global__ void adaptive_sigmoid_input_grad_kernel(
int n,
const float* data_in,
const float* grad_output,
const float* params,
float* grad_input
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float value = data_in[index];
float d_grad_output = grad_output[index];
float efx = exp(- alpha * (value - beta));
float patial = efx / ((1 + efx) * (1 + efx));
grad_input[index] = gamma * alpha * patial * d_grad_output;
}
}
__global__ void adaptive_sigmoid_params_grad_kernel(
int n,
const float* data_in,
const float* grad_output,
const float* params,
float* grad_params,
bool alpha_update,
bool beta_update,
bool gamma_update,
bool theta_update
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float value = data_in[index];
float d_grad_output = grad_output[index];
float efx = exp(- alpha * (value - beta));
float patial = efx / ((1 + efx) * (1 + efx));
float d_alpha = gamma * patial * (value - beta);
float d_beta = gamma * patial * (- alpha);
float d_gamma = 1 / (1 + efx);
float d_theta = -gamma;
// float d_beta = 0;
// float d_gamma = 0;
// float d_theta = 0;
if (alpha_update)
atomicAdd(grad_params + 0, d_alpha * d_grad_output);
if (beta_update)
atomicAdd(grad_params + 1, d_beta * d_grad_output);
if (gamma_update)
atomicAdd(grad_params + 2, d_gamma * d_grad_output);
if (theta_update)
atomicAdd(grad_params + 3, d_theta * d_grad_output);
}
}
void adaptive_sigmoid_fucntion(
hipStream_t stream,
const float* data_in,
const float* params,
float* output,
int channels, int height, int width
){
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( adaptive_sigmoid_fucntion_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels,
data_in,
params,
output
);
}
void adaptive_sigmoid_input_grad(
hipStream_t stream,
const float* data_in,
const float* grad_outputs,
const float* params,
float* grad_input,
int channels, int height, int width
){
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( adaptive_sigmoid_input_grad_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels,
data_in,
grad_outputs,
params,
grad_input
);
}
void adaptive_sigmoid_params_grad(
hipStream_t stream,
const float* data_in,
const float* grad_outputs,
const float* params,
float* grad_params,
int channels, int height, int width,
bool alpha_update,
bool beta_update,
bool gamma_update,
bool theta_update
){
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( adaptive_sigmoid_params_grad_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels,
data_in,
grad_outputs,
params,
grad_params,
alpha_update,
beta_update,
gamma_update,
theta_update
);
} | 17551a1f8140af0fe03511de66b61e0cff36c3c7.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include "adaptive_sigmoid.h"
#define CUDA_KERNEL_LOOP(i ,n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__global__ void adaptive_sigmoid_fucntion_kernel(
int n,
const float* data_in,
const float* params,
float* output
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float theta = params[3];
float value = data_in[index];
// output[index] = gamma * (1 / (1 + exp(-alpha * (value - beta)))) + theta;
output[index] = gamma * (1 / (1 + exp(-alpha * (value - beta))) - theta);
}
}
__global__ void adaptive_sigmoid_input_grad_kernel(
int n,
const float* data_in,
const float* grad_output,
const float* params,
float* grad_input
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float value = data_in[index];
float d_grad_output = grad_output[index];
float efx = exp(- alpha * (value - beta));
float patial = efx / ((1 + efx) * (1 + efx));
grad_input[index] = gamma * alpha * patial * d_grad_output;
}
}
__global__ void adaptive_sigmoid_params_grad_kernel(
int n,
const float* data_in,
const float* grad_output,
const float* params,
float* grad_params,
bool alpha_update,
bool beta_update,
bool gamma_update,
bool theta_update
){
CUDA_KERNEL_LOOP(index, n){
float alpha = params[0];
float beta = params[1];
float gamma = params[2];
float value = data_in[index];
float d_grad_output = grad_output[index];
float efx = exp(- alpha * (value - beta));
float patial = efx / ((1 + efx) * (1 + efx));
float d_alpha = gamma * patial * (value - beta);
float d_beta = gamma * patial * (- alpha);
float d_gamma = 1 / (1 + efx);
float d_theta = -gamma;
// float d_beta = 0;
// float d_gamma = 0;
// float d_theta = 0;
if (alpha_update)
atomicAdd(grad_params + 0, d_alpha * d_grad_output);
if (beta_update)
atomicAdd(grad_params + 1, d_beta * d_grad_output);
if (gamma_update)
atomicAdd(grad_params + 2, d_gamma * d_grad_output);
if (theta_update)
atomicAdd(grad_params + 3, d_theta * d_grad_output);
}
}
void adaptive_sigmoid_fucntion(
cudaStream_t stream,
const float* data_in,
const float* params,
float* output,
int channels, int height, int width
){
int num_kernels = channels * height * width;
adaptive_sigmoid_fucntion_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels,
data_in,
params,
output
);
}
void adaptive_sigmoid_input_grad(
cudaStream_t stream,
const float* data_in,
const float* grad_outputs,
const float* params,
float* grad_input,
int channels, int height, int width
){
int num_kernels = channels * height * width;
adaptive_sigmoid_input_grad_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels,
data_in,
grad_outputs,
params,
grad_input
);
}
void adaptive_sigmoid_params_grad(
cudaStream_t stream,
const float* data_in,
const float* grad_outputs,
const float* params,
float* grad_params,
int channels, int height, int width,
bool alpha_update,
bool beta_update,
bool gamma_update,
bool theta_update
){
int num_kernels = channels * height * width;
adaptive_sigmoid_params_grad_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels,
data_in,
grad_outputs,
params,
grad_params,
alpha_update,
beta_update,
gamma_update,
theta_update
);
} |
transpose_gpu_impl_test.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu_impl.cuh" // NOLINT
#include "dali/kernels/transpose/transpose_gpu_setup.cuh" // NOLINT
#include <gtest/gtest.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/kernels/common/utils.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/test/test_tensors.h"
#include "dali/core/cuda_event.h"
#include "dali/kernels/transpose/transpose_test.h"
namespace dali {
namespace kernels {
using namespace transpose_impl; // NOLINT
TEST(SimplifyPermute, NoSimplification) {
int64_t shape[] = { 2, 3, 4, 5 };
int perm[] = { 0, 3, 2, 1 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 4);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, CollapseUnitDims) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 0, 5, 1, 3, 2, 4 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, Collapse) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 3, 4, 5, 0, 1, 2 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 6, 20 };
ref_perm = { 1, 0 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(TransposeGPU, GetTransposeMethod) {
{
TensorShape<> shape = { 640*480, 3 };
int perm[] = { 1, 0 };
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Deinterleave);
}
{
TensorShape<> shape = { 3, 640*480 };
int perm[] = { 1, 0 }; // interleave
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Interleave);
}
{
TensorShape<> shape = { 640, 480 };
int perm[] = { 1, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 20, 640, 480 };
int perm[] = { 1, 2, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 480, 3 };
int perm[] = { 1, 0, 2 }; // vectorized tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 3, 480 };
int perm[] = { 1, 2, 0 }; // some mess
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640, 480, 50 };
int perm[] = { 1, 0, 2 }; // generic stuff
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640*480 };
int perm[] = { 0 }; // identity
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 1, sizeof(int)),
TransposeMethod::Copy);
}
}
TEST(TransposeTiled, AllPerm4DInnermost) {
TensorShape<> shape = { 19, 57, 37, 53 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
int grid_size = ::max(1, size / 512);
ASSERT_LT(grid_size * 512, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
hipMemset(out_gpu, 0xff, size*sizeof(int));
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
hipEventRecord(start);
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
hipEventRecord(end);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
hipEventElapsedTime(&time, start, end);
time *= 1e+6;
std::cerr << 2*size*sizeof(int) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
hipMemset(out_gpu, 0xff, size*sizeof(int));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeDeinterleave, AllPerm4DInnermost) {
int channels = 3;
TensorShape<> shape = { 19, 157, 137, channels }; // small inner dimension
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int block_size = 256;
int grid_size = ::max(1, size / (block_size * channels));
ASSERT_LT(grid_size * block_size * channels, size)
<< "Weak test error: Grid too large to test grid loop";
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
hipMemset(out_gpu, 0xff, size*sizeof(int));
DeinterleaveDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitDeinterleave(desc, shape, make_span(perm), out_gpu, in_gpu);
hipEventRecord(start);
hipLaunchKernelGGL(( TransposeDeinterleaveSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
hipEventRecord(end);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
hipEventElapsedTime(&time, start, end);
time *= 1e+6;
std::cerr << 2*size*sizeof(int) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeGeneric, AllPerm4D) {
TensorShape<> shape = { 31, 43, 53, 47 };
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int grid_size = 2048;
int block_size = 256;
ASSERT_LT(grid_size * block_size, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << " input shape "
<< shape << "\n";
hipMemset(out_gpu, 0xff, size*sizeof(int));
GenericTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitGenericTranspose(desc, shape, make_span(perm), out_gpu, in_gpu);
hipLaunchKernelGGL(( TransposeGenericSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
TensorShape<> simplified_shape;
SmallVector<int, 6> simplified_perm;
SimplifyPermute(simplified_shape, simplified_perm, shape.data(), perm, 4);
if (simplified_shape == shape) {
for (int i = 0; i < 4; i++) {
ASSERT_EQ(simplified_perm[i], perm[i]);
}
// no simplification, don't repeat the test
continue;
}
std::cerr << "Testing permutation ";
for (auto i : simplified_perm)
std::cerr << i << " ";
std::cerr << " input shape " << simplified_shape << "\n";
memset(&desc, 0xCC, sizeof(desc));
hipMemset(out_gpu, 0xff, size*sizeof(int));
InitGenericTranspose(desc, simplified_shape, make_span(simplified_perm), out_gpu, in_gpu);
hipLaunchKernelGGL(( TransposeGenericSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
} // namespace kernels
} // namespace dali
| transpose_gpu_impl_test.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu_impl.cuh" // NOLINT
#include "dali/kernels/transpose/transpose_gpu_setup.cuh" // NOLINT
#include <gtest/gtest.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/kernels/common/utils.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/test/test_tensors.h"
#include "dali/core/cuda_event.h"
#include "dali/kernels/transpose/transpose_test.h"
namespace dali {
namespace kernels {
using namespace transpose_impl; // NOLINT
TEST(SimplifyPermute, NoSimplification) {
int64_t shape[] = { 2, 3, 4, 5 };
int perm[] = { 0, 3, 2, 1 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 4);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, CollapseUnitDims) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 0, 5, 1, 3, 2, 4 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, Collapse) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 3, 4, 5, 0, 1, 2 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 6, 20 };
ref_perm = { 1, 0 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(TransposeGPU, GetTransposeMethod) {
{
TensorShape<> shape = { 640*480, 3 };
int perm[] = { 1, 0 };
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Deinterleave);
}
{
TensorShape<> shape = { 3, 640*480 };
int perm[] = { 1, 0 }; // interleave
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Interleave);
}
{
TensorShape<> shape = { 640, 480 };
int perm[] = { 1, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 20, 640, 480 };
int perm[] = { 1, 2, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 480, 3 };
int perm[] = { 1, 0, 2 }; // vectorized tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 3, 480 };
int perm[] = { 1, 2, 0 }; // some mess
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640, 480, 50 };
int perm[] = { 1, 0, 2 }; // generic stuff
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640*480 };
int perm[] = { 0 }; // identity
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 1, sizeof(int)),
TransposeMethod::Copy);
}
}
TEST(TransposeTiled, AllPerm4DInnermost) {
TensorShape<> shape = { 19, 57, 37, 53 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
int grid_size = std::max(1, size / 512);
ASSERT_LT(grid_size * 512, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
cudaMemset(out_gpu, 0xff, size*sizeof(int));
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
cudaEventRecord(start);
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
cudaEventRecord(end);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
cudaEventElapsedTime(&time, start, end);
time *= 1e+6;
std::cerr << 2*size*sizeof(int) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
cudaMemset(out_gpu, 0xff, size*sizeof(int));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeDeinterleave, AllPerm4DInnermost) {
int channels = 3;
TensorShape<> shape = { 19, 157, 137, channels }; // small inner dimension
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int block_size = 256;
int grid_size = std::max(1, size / (block_size * channels));
ASSERT_LT(grid_size * block_size * channels, size)
<< "Weak test error: Grid too large to test grid loop";
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
cudaMemset(out_gpu, 0xff, size*sizeof(int));
DeinterleaveDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitDeinterleave(desc, shape, make_span(perm), out_gpu, in_gpu);
cudaEventRecord(start);
TransposeDeinterleaveSingle<<<grid_size, block_size>>>(desc);
cudaEventRecord(end);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
cudaEventElapsedTime(&time, start, end);
time *= 1e+6;
std::cerr << 2*size*sizeof(int) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeGeneric, AllPerm4D) {
TensorShape<> shape = { 31, 43, 53, 47 };
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int grid_size = 2048;
int block_size = 256;
ASSERT_LT(grid_size * block_size, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << " input shape "
<< shape << "\n";
cudaMemset(out_gpu, 0xff, size*sizeof(int));
GenericTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitGenericTranspose(desc, shape, make_span(perm), out_gpu, in_gpu);
TransposeGenericSingle<<<grid_size, block_size>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
TensorShape<> simplified_shape;
SmallVector<int, 6> simplified_perm;
SimplifyPermute(simplified_shape, simplified_perm, shape.data(), perm, 4);
if (simplified_shape == shape) {
for (int i = 0; i < 4; i++) {
ASSERT_EQ(simplified_perm[i], perm[i]);
}
// no simplification, don't repeat the test
continue;
}
std::cerr << "Testing permutation ";
for (auto i : simplified_perm)
std::cerr << i << " ";
std::cerr << " input shape " << simplified_shape << "\n";
memset(&desc, 0xCC, sizeof(desc));
cudaMemset(out_gpu, 0xff, size*sizeof(int));
InitGenericTranspose(desc, simplified_shape, make_span(simplified_perm), out_gpu, in_gpu);
TransposeGenericSingle<<<grid_size, block_size>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
} // namespace kernels
} // namespace dali
|
7257475f716108bbc4faac63296c58d31fe81c56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define blockinfo(i,j) blockinfo[(i)*c_blocks + (j)]
#define val(i,j) val+((blockinfo(i,j)-1)*size_b*size_b)
// every thread initializes one entry
__global__ void
zbcsrblockinfo5_kernel(
magma_int_t num_blocks,
magmaDoubleComplex_ptr address,
magmaDoubleComplex_ptr *AII )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num_blocks ){
*AII[ i ] = *address;
if(i==0)
printf("address: %d\n", address);
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param[in]
lustep magma_int_t
lustep
@param[in]
num_blocks magma_int_t
number of nonzero blocks
@param[in]
c_blocks magma_int_t
number of column-blocks
@param[in]
size_b magma_int_t
blocksize
@param[in]
blockinfo magma_int_t*
block filled? location?
@param[in]
val magmaDoubleComplex*
pointers to the nonzero blocks in A
@param[in]
AII magmaDoubleComplex**
pointers to the respective nonzero blocks in B
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrblockinfo5(
magma_int_t lustep,
magma_int_t num_blocks,
magma_int_t c_blocks,
magma_int_t size_b,
magma_index_t *blockinfo,
magmaDoubleComplex_ptr val,
magmaDoubleComplex_ptr *AII,
magma_queue_t queue )
{
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
int dimgrid = (num_blocks+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 dimGrid( dimgrid, 1, 1 );
printf("dim grid: %d x %d", dimgrid, BLOCK_SIZE);
magmaDoubleComplex **hAII;
magma_malloc((void **)&hAII, num_blocks*sizeof(magmaDoubleComplex*));
for(int i=0; i<num_blocks; i++) {
hAII[i] = val(lustep,lustep);
}
magma_setvector( num_blocks, sizeof(magmaDoubleComplex*),
hAII, 1, AII, 1 );
/*
magma_setvector( 1, sizeof(magmaDoubleComplex*), address, 1, daddress, 1 );
zbcsrblockinfo5_kernel<<<dimGrid,dimBlock, 0, queue >>>
( num_blocks, daddress, AII );
*/
return MAGMA_SUCCESS;
}
| 7257475f716108bbc4faac63296c58d31fe81c56.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define blockinfo(i,j) blockinfo[(i)*c_blocks + (j)]
#define val(i,j) val+((blockinfo(i,j)-1)*size_b*size_b)
// every thread initializes one entry
__global__ void
zbcsrblockinfo5_kernel(
magma_int_t num_blocks,
magmaDoubleComplex_ptr address,
magmaDoubleComplex_ptr *AII )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num_blocks ){
*AII[ i ] = *address;
if(i==0)
printf("address: %d\n", address);
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param[in]
lustep magma_int_t
lustep
@param[in]
num_blocks magma_int_t
number of nonzero blocks
@param[in]
c_blocks magma_int_t
number of column-blocks
@param[in]
size_b magma_int_t
blocksize
@param[in]
blockinfo magma_int_t*
block filled? location?
@param[in]
val magmaDoubleComplex*
pointers to the nonzero blocks in A
@param[in]
AII magmaDoubleComplex**
pointers to the respective nonzero blocks in B
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrblockinfo5(
magma_int_t lustep,
magma_int_t num_blocks,
magma_int_t c_blocks,
magma_int_t size_b,
magma_index_t *blockinfo,
magmaDoubleComplex_ptr val,
magmaDoubleComplex_ptr *AII,
magma_queue_t queue )
{
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
int dimgrid = (num_blocks+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 dimGrid( dimgrid, 1, 1 );
printf("dim grid: %d x %d", dimgrid, BLOCK_SIZE);
magmaDoubleComplex **hAII;
magma_malloc((void **)&hAII, num_blocks*sizeof(magmaDoubleComplex*));
for(int i=0; i<num_blocks; i++) {
hAII[i] = val(lustep,lustep);
}
magma_setvector( num_blocks, sizeof(magmaDoubleComplex*),
hAII, 1, AII, 1 );
/*
magma_setvector( 1, sizeof(magmaDoubleComplex*), address, 1, daddress, 1 );
zbcsrblockinfo5_kernel<<<dimGrid,dimBlock, 0, queue >>>
( num_blocks, daddress, AII );
*/
return MAGMA_SUCCESS;
}
|
001e72569c84ad9e1ed065833466ed394688d5e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__global__ void kernel(float *a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s*s+c*c);
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int main(int argc, char **argv)
{
const int blockSize = 256, nStreams = 4;
const int n = 4 * 1024 * blockSize * nStreams * 100;
const int streamSize = n / nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId) );
// allocate pinned host memory and device memory
float *a, *d_a;
checkCuda( hipHostMalloc((void**)&a, bytes) ); // host pinned
checkCuda( hipMalloc((void**)&d_a, bytes) ); // device
float ms; // elapsed time in milliseconds
// create events and streams
hipEvent_t startEvent, stopEvent, dummyEvent;
hipStream_t stream[nStreams];
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( hipStreamCreate(&stream[i]) );
// baseline case - sequential transfer and execute
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
checkCuda( hipMemcpy(d_a, a, bytes / nStreams, hipMemcpyHostToDevice) );
//kernel<<<n/blockSize, blockSize>>>(d_a, 0);
checkCuda( hipMemcpy(a, d_a, bytes / nStreams, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for sequential transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// asynchronous version 1: loop over {copy, kernel, copy}
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, hipMemcpyHostToDevice,
stream[i]) );
//kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
checkCuda( hipMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, hipMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V1 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// asynchronous version 2:
// loop over copy, loop over kernel, loop over copy
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, hipMemcpyHostToDevice,
stream[i]) );
}
/*
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
}
*/
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, hipMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V2 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// cleanup
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
checkCuda( hipEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( hipStreamDestroy(stream[i]) );
hipFree(d_a);
hipHostFree(a);
return 0;
}
| 001e72569c84ad9e1ed065833466ed394688d5e4.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void kernel(float *a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s*s+c*c);
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int main(int argc, char **argv)
{
const int blockSize = 256, nStreams = 4;
const int n = 4 * 1024 * blockSize * nStreams * 100;
const int streamSize = n / nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId) );
// allocate pinned host memory and device memory
float *a, *d_a;
checkCuda( cudaMallocHost((void**)&a, bytes) ); // host pinned
checkCuda( cudaMalloc((void**)&d_a, bytes) ); // device
float ms; // elapsed time in milliseconds
// create events and streams
cudaEvent_t startEvent, stopEvent, dummyEvent;
cudaStream_t stream[nStreams];
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( cudaStreamCreate(&stream[i]) );
// baseline case - sequential transfer and execute
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
checkCuda( cudaMemcpy(d_a, a, bytes / nStreams, cudaMemcpyHostToDevice) );
//kernel<<<n/blockSize, blockSize>>>(d_a, 0);
checkCuda( cudaMemcpy(a, d_a, bytes / nStreams, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for sequential transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// asynchronous version 1: loop over {copy, kernel, copy}
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, cudaMemcpyHostToDevice,
stream[i]) );
//kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
checkCuda( cudaMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, cudaMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V1 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// asynchronous version 2:
// loop over copy, loop over kernel, loop over copy
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, cudaMemcpyHostToDevice,
stream[i]) );
}
/*
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
}
*/
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, cudaMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V2 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// cleanup
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
checkCuda( cudaEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( cudaStreamDestroy(stream[i]) );
cudaFree(d_a);
cudaFreeHost(a);
return 0;
}
|
3f9945e223e400e22fad44d2d0e41d3bd9a970b7.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template hipError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
| 3f9945e223e400e22fad44d2d0e41d3bd9a970b7.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template cudaError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
|
64847930706a113c2e5e9fa1b5668a318fa7b044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/warp_solver/SparseCorrespondenceHandler.h"
#include "SparseCorrespondenceHandler.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
enum {
window_halfsize = 1,
};
__device__ ushort2 validGeometryPixelInWindow(
hipTextureObject_t index_map,
unsigned short center_x, unsigned short center_y
) {
ushort2 valid_pixel = make_ushort2(0xFFFF, 0xFFFF);
//Perform a window search
for(auto y = center_y - window_halfsize; y <= center_y + window_halfsize; y++) {
for(auto x = center_x - window_halfsize; x <= center_x + window_halfsize; x++) {
if(tex2D<unsigned>(index_map, x, y) != 0xFFFFFFFF) {
valid_pixel.x = x;
valid_pixel.y = y;
break;
}
}
}
//Always prefer the center one
if(tex2D<unsigned>(index_map, center_x, center_y) != 0xFFFFFFFF) {
valid_pixel.x = center_x;
valid_pixel.y = center_y;
}
//Return it
return valid_pixel;
}
__device__ ushort2 validDepthPixelInWindow(
hipTextureObject_t depth_vertex_map,
unsigned short center_x, unsigned short center_y
) {
ushort2 valid_pixel = make_ushort2(0xFFFF, 0xFFFF);
//Perform a window search
for(auto y = center_y - window_halfsize; y <= center_y + window_halfsize; y++) {
for(auto x = center_x - window_halfsize; x <= center_x + window_halfsize; x++) {
const float4 vertex = tex2D<float4>(depth_vertex_map, x, y);
if(!is_zero_vertex(vertex)) {
valid_pixel.x = x;
valid_pixel.y = y;
break;
}
}
}
//Always prefer the center one
const float4 center_vertex = tex2D<float4>(depth_vertex_map, center_x, center_y);
if(!is_zero_vertex(center_vertex)) {
valid_pixel.x = center_x;
valid_pixel.y = center_y;
}
//Return it
return valid_pixel;
}
__global__ void chooseValidPixelKernel(
DeviceArrayView<ushort4> candidate_pixel_pairs,
hipTextureObject_t depth_vertex_map,
hipTextureObject_t index_map,
unsigned rows, unsigned cols,
unsigned* valid_indicator,
ushort4* valid_pixel_pairs
) {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < candidate_pixel_pairs.Size())
{
const auto candidate_pair = candidate_pixel_pairs[idx];
const auto geometry_pixel = validGeometryPixelInWindow(index_map, candidate_pair.x, candidate_pair.y);
const auto depth_pixel = validDepthPixelInWindow(depth_vertex_map, candidate_pair.z, candidate_pair.w);
if(geometry_pixel.x < cols && geometry_pixel.y < rows && depth_pixel.x < cols && depth_pixel.y < rows) {
valid_indicator[idx] = 1;
valid_pixel_pairs[idx] = make_ushort4(geometry_pixel.x, geometry_pixel.y, depth_pixel.x, depth_pixel.y);
}
else {
valid_indicator[idx] = 0;
}
}
}
__global__ void compactQueryValidPairsKernel(
hipTextureObject_t depth_vertex_map,
hipTextureObject_t reference_vertex_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
const DeviceArrayView<unsigned> valid_indicator,
const unsigned* prefixsum_indicator,
const ushort4* valid_pixel_pairs,
const mat34 camera2world,
float4* target_vertex_array,
float4* reference_vertex_array,
ushort4* knn_array,
float4* knn_weight_array
) {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= valid_indicator.Size()) return;
if(valid_indicator[idx] != 0) {
const auto offset = prefixsum_indicator[idx] - 1;
const auto pixel_pair = valid_pixel_pairs[idx];
const float4 reference_vertex = tex2D<float4>(reference_vertex_map, pixel_pair.x, pixel_pair.y);
const float4 depth_vertex = tex2D<float4>(depth_vertex_map, pixel_pair.z, pixel_pair.w);
const auto knn = knn_map(pixel_pair.y, pixel_pair.x); //KNN must be valid
//Compute the target vertex
const float3 depth_v3 = make_float3(depth_vertex.x, depth_vertex.y, depth_vertex.z);
const float3 target_v3 = camera2world.rot * depth_v3 + camera2world.trans;
//Write to output
target_vertex_array[offset] = make_float4(target_v3.x, target_v3.y, target_v3.z, 1.0f);
reference_vertex_array[offset] = reference_vertex;
knn_array[offset] = knn.knn;
knn_weight_array[offset] = knn.weight;
}
}
//Forward warp the vertex for better computation of jacobian
__global__ void forwardWarpFeatureVertexKernel(
DeviceArrayView<float4> reference_vertex_array,
const ushort4* vertex_knn_array,
const float4* vertex_knnweight_array,
const DualQuaternion* node_se3,
float4* warped_vertex_array
) {
const auto idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < reference_vertex_array.Size()) {
const float4 reference_vertex = reference_vertex_array[idx];
const ushort4 knn = vertex_knn_array[idx];
const float4 knnweight = vertex_knnweight_array[idx];
DualQuaternion dq = averageDualQuaternion(node_se3, knn, knnweight);
const mat34 se3 = dq.se3_matrix();
const float3 warped_vertex = se3.rot * reference_vertex + se3.trans;
warped_vertex_array[idx] = make_float4(warped_vertex.x, warped_vertex.y, warped_vertex.z, 1.0f);
}
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::SparseCorrespondenceHandler::ChooseValidPixelPairs(hipStream_t stream) {
m_valid_pixel_indicator.ResizeArrayOrException(m_observations.correspond_pixel_pairs.Size());
m_corrected_pixel_pairs.ResizeArrayOrException(m_observations.correspond_pixel_pairs.Size());
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) return;
dim3 blk(64);
dim3 grid(divUp(m_valid_pixel_indicator.ArraySize(), blk.x));
const auto rows = m_geometry_maps.knn_map.Rows();
const auto cols = m_geometry_maps.knn_map.Cols();
hipLaunchKernelGGL(( device::chooseValidPixelKernel), dim3(grid), dim3(blk), 0, stream,
m_observations.correspond_pixel_pairs,
m_observations.depth_vertex_map,
m_geometry_maps.index_map,
rows, cols,
m_valid_pixel_indicator,
m_corrected_pixel_pairs
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::CompactQueryPixelPairs(hipStream_t stream) {
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) return;
//Inclusive sum
m_valid_pixel_prefixsum.InclusiveSum(m_valid_pixel_indicator.ArrayView(), stream);
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
//Choose it
dim3 blk(64);
dim3 grid(divUp(m_valid_pixel_indicator.ArraySize(), blk.x));
hipLaunchKernelGGL(( device::compactQueryValidPairsKernel), dim3(grid), dim3(blk), 0, stream,
m_observations.depth_vertex_map,
m_geometry_maps.reference_vertex_map,
m_geometry_maps.knn_map,
//Prefix-sum information
m_valid_pixel_indicator.ArrayView(),
m_valid_pixel_prefixsum.valid_prefixsum_array.ptr(),
m_corrected_pixel_pairs.Ptr(),
m_camera2world,
//The output
m_valid_target_vertex.Ptr(),
m_valid_reference_vertex.Ptr(),
m_valid_vertex_knn.Ptr(),
m_valid_knn_weight.Ptr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::QueryCompactedArraySize(hipStream_t stream) {
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) {
m_valid_target_vertex.ResizeArrayOrException(0);
m_valid_reference_vertex.ResizeArrayOrException(0);
m_valid_vertex_knn.ResizeArrayOrException(0);
m_valid_knn_weight.ResizeArrayOrException(0);
return;
}
//Non-empty array
//unsigned valid_array_size;
cudaSafeCall(hipMemcpyAsync(
m_correspondence_array_size,
m_valid_pixel_prefixsum.valid_prefixsum_array.ptr() + m_valid_pixel_prefixsum.valid_prefixsum_array.size() - 1,
sizeof(unsigned),
hipMemcpyDeviceToHost,
stream
));
//Sync before use
cudaSafeCall(hipStreamSynchronize(stream));
//LOG(INFO) << "The number of valid pixel pairs is " << valid_array_size;
//Correct the size
m_valid_target_vertex.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_reference_vertex.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_vertex_knn.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_knn_weight.ResizeArrayOrException(*m_correspondence_array_size);
}
/* The method to build the term 2 jacobian map
*/
void surfelwarp::SparseCorrespondenceHandler::forwardWarpFeatureVertex(hipStream_t stream) {
if(m_valid_reference_vertex.ArraySize() == 0)
return;
//Correct the size
m_valid_warped_vertex.ResizeArrayOrException(m_valid_reference_vertex.ArraySize());
//Do a forward warp
dim3 blk(128);
dim3 grid(divUp(m_valid_reference_vertex.ArraySize(), blk.x));
hipLaunchKernelGGL(( device::forwardWarpFeatureVertexKernel), dim3(grid), dim3(blk), 0, stream,
m_valid_reference_vertex.ArrayView(),
m_valid_vertex_knn.Ptr(), m_valid_knn_weight.Ptr(),
m_node_se3.RawPtr(),
m_valid_warped_vertex.Ptr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::BuildTerm2Jacobian(hipStream_t stream) {
forwardWarpFeatureVertex(stream);
}
surfelwarp::Point2PointICPTerm2Jacobian surfelwarp::SparseCorrespondenceHandler::Term2JacobianMap() const {
Point2PointICPTerm2Jacobian term2jacobian;
term2jacobian.target_vertex = m_valid_target_vertex.ArrayView();
term2jacobian.reference_vertex = m_valid_reference_vertex.ArrayView();
term2jacobian.knn = m_valid_vertex_knn.ArrayView();
term2jacobian.knn_weight = m_valid_knn_weight.ArrayView();
term2jacobian.node_se3 = m_node_se3;
term2jacobian.warped_vertex = m_valid_warped_vertex.ArrayView();
//Check the size
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.reference_vertex.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.knn.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.knn_weight.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.warped_vertex.Size());
//Return it
return term2jacobian;
}
| 64847930706a113c2e5e9fa1b5668a318fa7b044.cu | #include "core/warp_solver/SparseCorrespondenceHandler.h"
#include "SparseCorrespondenceHandler.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
enum {
window_halfsize = 1,
};
__device__ ushort2 validGeometryPixelInWindow(
cudaTextureObject_t index_map,
unsigned short center_x, unsigned short center_y
) {
ushort2 valid_pixel = make_ushort2(0xFFFF, 0xFFFF);
//Perform a window search
for(auto y = center_y - window_halfsize; y <= center_y + window_halfsize; y++) {
for(auto x = center_x - window_halfsize; x <= center_x + window_halfsize; x++) {
if(tex2D<unsigned>(index_map, x, y) != 0xFFFFFFFF) {
valid_pixel.x = x;
valid_pixel.y = y;
break;
}
}
}
//Always prefer the center one
if(tex2D<unsigned>(index_map, center_x, center_y) != 0xFFFFFFFF) {
valid_pixel.x = center_x;
valid_pixel.y = center_y;
}
//Return it
return valid_pixel;
}
__device__ ushort2 validDepthPixelInWindow(
cudaTextureObject_t depth_vertex_map,
unsigned short center_x, unsigned short center_y
) {
ushort2 valid_pixel = make_ushort2(0xFFFF, 0xFFFF);
//Perform a window search
for(auto y = center_y - window_halfsize; y <= center_y + window_halfsize; y++) {
for(auto x = center_x - window_halfsize; x <= center_x + window_halfsize; x++) {
const float4 vertex = tex2D<float4>(depth_vertex_map, x, y);
if(!is_zero_vertex(vertex)) {
valid_pixel.x = x;
valid_pixel.y = y;
break;
}
}
}
//Always prefer the center one
const float4 center_vertex = tex2D<float4>(depth_vertex_map, center_x, center_y);
if(!is_zero_vertex(center_vertex)) {
valid_pixel.x = center_x;
valid_pixel.y = center_y;
}
//Return it
return valid_pixel;
}
__global__ void chooseValidPixelKernel(
DeviceArrayView<ushort4> candidate_pixel_pairs,
cudaTextureObject_t depth_vertex_map,
cudaTextureObject_t index_map,
unsigned rows, unsigned cols,
unsigned* valid_indicator,
ushort4* valid_pixel_pairs
) {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < candidate_pixel_pairs.Size())
{
const auto candidate_pair = candidate_pixel_pairs[idx];
const auto geometry_pixel = validGeometryPixelInWindow(index_map, candidate_pair.x, candidate_pair.y);
const auto depth_pixel = validDepthPixelInWindow(depth_vertex_map, candidate_pair.z, candidate_pair.w);
if(geometry_pixel.x < cols && geometry_pixel.y < rows && depth_pixel.x < cols && depth_pixel.y < rows) {
valid_indicator[idx] = 1;
valid_pixel_pairs[idx] = make_ushort4(geometry_pixel.x, geometry_pixel.y, depth_pixel.x, depth_pixel.y);
}
else {
valid_indicator[idx] = 0;
}
}
}
__global__ void compactQueryValidPairsKernel(
cudaTextureObject_t depth_vertex_map,
cudaTextureObject_t reference_vertex_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
const DeviceArrayView<unsigned> valid_indicator,
const unsigned* prefixsum_indicator,
const ushort4* valid_pixel_pairs,
const mat34 camera2world,
float4* target_vertex_array,
float4* reference_vertex_array,
ushort4* knn_array,
float4* knn_weight_array
) {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= valid_indicator.Size()) return;
if(valid_indicator[idx] != 0) {
const auto offset = prefixsum_indicator[idx] - 1;
const auto pixel_pair = valid_pixel_pairs[idx];
const float4 reference_vertex = tex2D<float4>(reference_vertex_map, pixel_pair.x, pixel_pair.y);
const float4 depth_vertex = tex2D<float4>(depth_vertex_map, pixel_pair.z, pixel_pair.w);
const auto knn = knn_map(pixel_pair.y, pixel_pair.x); //KNN must be valid
//Compute the target vertex
const float3 depth_v3 = make_float3(depth_vertex.x, depth_vertex.y, depth_vertex.z);
const float3 target_v3 = camera2world.rot * depth_v3 + camera2world.trans;
//Write to output
target_vertex_array[offset] = make_float4(target_v3.x, target_v3.y, target_v3.z, 1.0f);
reference_vertex_array[offset] = reference_vertex;
knn_array[offset] = knn.knn;
knn_weight_array[offset] = knn.weight;
}
}
//Forward warp the vertex for better computation of jacobian
__global__ void forwardWarpFeatureVertexKernel(
DeviceArrayView<float4> reference_vertex_array,
const ushort4* vertex_knn_array,
const float4* vertex_knnweight_array,
const DualQuaternion* node_se3,
float4* warped_vertex_array
) {
const auto idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < reference_vertex_array.Size()) {
const float4 reference_vertex = reference_vertex_array[idx];
const ushort4 knn = vertex_knn_array[idx];
const float4 knnweight = vertex_knnweight_array[idx];
DualQuaternion dq = averageDualQuaternion(node_se3, knn, knnweight);
const mat34 se3 = dq.se3_matrix();
const float3 warped_vertex = se3.rot * reference_vertex + se3.trans;
warped_vertex_array[idx] = make_float4(warped_vertex.x, warped_vertex.y, warped_vertex.z, 1.0f);
}
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::SparseCorrespondenceHandler::ChooseValidPixelPairs(cudaStream_t stream) {
m_valid_pixel_indicator.ResizeArrayOrException(m_observations.correspond_pixel_pairs.Size());
m_corrected_pixel_pairs.ResizeArrayOrException(m_observations.correspond_pixel_pairs.Size());
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) return;
dim3 blk(64);
dim3 grid(divUp(m_valid_pixel_indicator.ArraySize(), blk.x));
const auto rows = m_geometry_maps.knn_map.Rows();
const auto cols = m_geometry_maps.knn_map.Cols();
device::chooseValidPixelKernel<<<grid, blk, 0, stream>>>(
m_observations.correspond_pixel_pairs,
m_observations.depth_vertex_map,
m_geometry_maps.index_map,
rows, cols,
m_valid_pixel_indicator,
m_corrected_pixel_pairs
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::CompactQueryPixelPairs(cudaStream_t stream) {
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) return;
//Inclusive sum
m_valid_pixel_prefixsum.InclusiveSum(m_valid_pixel_indicator.ArrayView(), stream);
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
//Choose it
dim3 blk(64);
dim3 grid(divUp(m_valid_pixel_indicator.ArraySize(), blk.x));
device::compactQueryValidPairsKernel<<<grid, blk, 0, stream>>>(
m_observations.depth_vertex_map,
m_geometry_maps.reference_vertex_map,
m_geometry_maps.knn_map,
//Prefix-sum information
m_valid_pixel_indicator.ArrayView(),
m_valid_pixel_prefixsum.valid_prefixsum_array.ptr(),
m_corrected_pixel_pairs.Ptr(),
m_camera2world,
//The output
m_valid_target_vertex.Ptr(),
m_valid_reference_vertex.Ptr(),
m_valid_vertex_knn.Ptr(),
m_valid_knn_weight.Ptr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::QueryCompactedArraySize(cudaStream_t stream) {
//The correspondence array might be empty
if(m_valid_pixel_indicator.ArraySize() == 0) {
m_valid_target_vertex.ResizeArrayOrException(0);
m_valid_reference_vertex.ResizeArrayOrException(0);
m_valid_vertex_knn.ResizeArrayOrException(0);
m_valid_knn_weight.ResizeArrayOrException(0);
return;
}
//Non-empty array
//unsigned valid_array_size;
cudaSafeCall(cudaMemcpyAsync(
m_correspondence_array_size,
m_valid_pixel_prefixsum.valid_prefixsum_array.ptr() + m_valid_pixel_prefixsum.valid_prefixsum_array.size() - 1,
sizeof(unsigned),
cudaMemcpyDeviceToHost,
stream
));
//Sync before use
cudaSafeCall(cudaStreamSynchronize(stream));
//LOG(INFO) << "The number of valid pixel pairs is " << valid_array_size;
//Correct the size
m_valid_target_vertex.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_reference_vertex.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_vertex_knn.ResizeArrayOrException(*m_correspondence_array_size);
m_valid_knn_weight.ResizeArrayOrException(*m_correspondence_array_size);
}
/* The method to build the term 2 jacobian map
*/
void surfelwarp::SparseCorrespondenceHandler::forwardWarpFeatureVertex(cudaStream_t stream) {
if(m_valid_reference_vertex.ArraySize() == 0)
return;
//Correct the size
m_valid_warped_vertex.ResizeArrayOrException(m_valid_reference_vertex.ArraySize());
//Do a forward warp
dim3 blk(128);
dim3 grid(divUp(m_valid_reference_vertex.ArraySize(), blk.x));
device::forwardWarpFeatureVertexKernel<<<grid, blk, 0, stream>>>(
m_valid_reference_vertex.ArrayView(),
m_valid_vertex_knn.Ptr(), m_valid_knn_weight.Ptr(),
m_node_se3.RawPtr(),
m_valid_warped_vertex.Ptr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::SparseCorrespondenceHandler::BuildTerm2Jacobian(cudaStream_t stream) {
forwardWarpFeatureVertex(stream);
}
surfelwarp::Point2PointICPTerm2Jacobian surfelwarp::SparseCorrespondenceHandler::Term2JacobianMap() const {
Point2PointICPTerm2Jacobian term2jacobian;
term2jacobian.target_vertex = m_valid_target_vertex.ArrayView();
term2jacobian.reference_vertex = m_valid_reference_vertex.ArrayView();
term2jacobian.knn = m_valid_vertex_knn.ArrayView();
term2jacobian.knn_weight = m_valid_knn_weight.ArrayView();
term2jacobian.node_se3 = m_node_se3;
term2jacobian.warped_vertex = m_valid_warped_vertex.ArrayView();
//Check the size
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.reference_vertex.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.knn.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.knn_weight.Size());
SURFELWARP_CHECK_EQ(term2jacobian.target_vertex.Size(), term2jacobian.warped_vertex.Size());
//Return it
return term2jacobian;
}
|
285e54b58bb19873a152cfaaf6a0480929df8149.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// k2/csrc/cuda/tensor_ops.cu
// Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey
// Fangjun Kuang,
// Haowen Qiu)
// See ../../LICENSE for clarification regarding multiple authors
#include "k2/csrc/tensor_ops.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "k2/csrc/dtype.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/nvtx.h"
namespace k2 {
template <typename T>
static void CopyTensorElements2d(ContextPtr c, int32_t dim0, int32_t dim1,
const T *src_data, int32_t src_stride0,
int32_t src_stride1, T *dest_data,
int32_t dest_stride0, int32_t dest_stride1) {
NVTX_RANGE(K2_FUNC);
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
// this is just an optimization, the other branch would work for CPU too.
for (int32_t i = 0; i < dim0; i++) {
for (int32_t j = 0; j < dim1; j++) {
dest_data[i * dest_stride0 + j * dest_stride1] =
src_data[i * src_stride0 + j * src_stride1];
}
}
} else {
K2_EVAL2(
c, dim0, dim1, lambda_set_elems, (int32_t i, int32_t j)->void {
dest_data[i * dest_stride0 + j * dest_stride1] =
src_data[i * src_stride0 + j * src_stride1];
});
}
}
template <typename T>
void CopyTensorElements1d(ContextPtr c, int32_t dim, const T *src_data,
int32_t src_stride, T *dest_data,
int32_t dest_stride) {
NVTX_RANGE(K2_FUNC);
K2_EVAL(
c, dim, lambda_set_elems, (int32_t i)->void {
dest_data[i * dest_stride] = src_data[i * src_stride];
});
}
// TODO(dan): this is far from ideal in terms of efficiency. There is no
// attempt to discover the simplest pattern that covers the copy, or to be smart
// about memory loads if it turns out to be a transposition.
void CopyTensorElements(Tensor src, Tensor dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(src.SameDims(dest));
ContextPtr c = GetContext(src, dest);
int32_t num_axes = src.NumAxes();
if (num_axes > 2) {
// For now, only directly support copies of at most 2 dims.
int32_t leading_dim = src.Dim(0);
ParallelRunner pr(c);
for (int32_t i = 0; i < leading_dim; i++) {
With(pr.NewStream());
Tensor src_part = src.Index(0, i), dest_part = dest.Index(0, i);
CopyTensorElements(src_part, dest_part);
}
} else {
const Shape &src_shape = src.GetShape(), &dest_shape = dest.GetShape();
int32_t src_stride0 = (num_axes > 0 ? src_shape.Stride(0) : 0),
dest_stride0 = (num_axes > 0 ? dest_shape.Stride(0) : 0),
dim0 = (num_axes > 0 ? src_shape.Dim(0) : 1);
Dtype dtype = src.GetDtype();
K2_CHECK(dtype == dest.GetDtype());
int32_t num_axes = src.NumAxes();
if (num_axes == 2) {
int32_t src_stride1 = src_shape.Stride(1),
dest_stride1 = dest_shape.Stride(1), dim1 = src_shape.Dim(1);
FOR_ALL_DTYPES(dtype, T,
CopyTensorElements2d<T>(
c, dim0, dim1, src.Data<T>(), src_stride0, src_stride1,
dest.Data<T>(), dest_stride0, dest_stride1));
} else {
FOR_ALL_DTYPES(
dtype, T,
CopyTensorElements1d<T>(c, dim0, src.Data<T>(), src_stride0,
dest.Data<T>(), dest_stride0));
}
}
}
Tensor ToContiguous(const Tensor &src) {
// things like this would be more efficient if we supported something like
// PyTorch's ArrayRef. not so critical to address that now though.
Tensor ans(src.Context(), src.GetDtype(), src.GetShape().Dims());
CopyTensorElements(src, ans);
return ans;
}
template <typename T, typename U>
void CastTensorElements1dContiguous(ContextPtr c, int32_t dim,
const T *src_data, U *dest_data) {
NVTX_RANGE(K2_FUNC);
K2_EVAL(
c, dim, lambda_cast_elems,
(int32_t i)->void { dest_data[i] = static_cast<U>(src_data[i]); });
}
Tensor Cast(Tensor src, Dtype new_dtype) {
NVTX_RANGE(K2_FUNC);
if (!src.IsContiguous()) src = ToContiguous(src);
ContextPtr c = src.Context();
Tensor ans(c, new_dtype, src.GetShape());
K2_DCHECK(ans.IsContiguous());
Dtype old_dtype = src.GetDtype();
int32_t dim = ans.NumElements();
FOR_ALL_DTYPES(old_dtype, T,
FOR_ALL_DTYPES(new_dtype, U,
CastTensorElements1dContiguous<T, U>(
c, dim, src.Data<T>(), ans.Data<U>())));
return ans;
}
// See the documentation of `Index`.
template <typename T>
static void Index1DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim,
const int32_t *indexes_data, bool allow_minus_one,
int32_t ans_dim, T *ans_data, double default_value) {
if (std::is_integral<T>::value) {
K2_CHECK_EQ(static_cast<T>(default_value), default_value);
}
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL(
context, ans_dim, lambda_set_values, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim);
K2_DCHECK(index >= 0 || index == -1);
T value =
(index < 0 ? T(default_value) : src_data[index * src_stride]);
ans_data[i] = value;
});
return;
}
// now handle the case allow_minus_one == false
K2_EVAL(
context, ans_dim, lambda_set_values, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim);
K2_DCHECK_GE(index, 0);
ans_data[i] = src_data[index * src_stride];
});
}
// See the documentation of `Index`.
template <typename T>
static void Index2DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim0, int32_t src_dim1,
const int32_t *indexes_data, bool allow_minus_one,
int32_t ans_dim, int32_t ans_stride, T *ans_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
if (context->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != ans_dim; ++i) {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, -1);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
if (index == -1) {
memset(cur_ans_data, 0, src_dim1 * sizeof(T));
} else {
memcpy(cur_ans_data, cur_src_data, src_dim1 * sizeof(T));
}
}
return;
}
// now for CUDA
auto lambda_set = [=] __device__(int32_t i, int32_t j) -> void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, -1);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
if (index == -1)
cur_ans_data[j] = 0;
else
cur_ans_data[j] = cur_src_data[j];
};
Eval2Device(context, ans_dim, src_dim1, lambda_set);
return;
}
// now handle the case when allow_minus_one is false
if (context->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != ans_dim; ++i) {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, 0);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
memcpy(cur_ans_data, cur_src_data, src_dim1 * sizeof(T));
}
return;
}
// now for CUDA
auto lambda_set = [=] __device__(int32_t i, int32_t j) -> void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, 0);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
cur_ans_data[j] = cur_src_data[j];
};
Eval2Device(context, ans_dim, src_dim1, lambda_set);
}
// See the documentation for `Index`.
// This function is for 1-D tensors.
static Tensor Index1D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, double default_value) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim()});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
const int32_t *indexes_data = indexes.Data();
int32_t src_dim = src.Dim(0);
int32_t ans_dim = ans.Dim(0);
FOR_ALL_DTYPES(
dtype, T,
Index1DImpl<T>(context, src.Data<T>(), src_stride, src_dim, indexes_data,
allow_minus_one, ans_dim, ans.Data<T>(), default_value));
return ans;
}
// See the documentation for `Index`.
// This function is for 2-D tensors.
static Tensor Index2D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim(), src.Dim(1)});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
K2_CHECK_EQ(src.Stride(1), 1);
const int32_t *indexes_data = indexes.Data();
int32_t src_dim0 = src.Dim(0);
int32_t src_dim1 = src.Dim(1);
int32_t ans_dim = ans.Dim(0);
int32_t ans_stride = ans.Stride(0);
FOR_ALL_DTYPES(dtype, T,
Index2DImpl<T>(context, src.Data<T>(), src_stride, src_dim0,
src_dim1, indexes_data, allow_minus_one,
ans_dim, ans_stride, ans.Data<T>()));
return ans;
}
Tensor Index(Tensor &src, Array1<int32_t> &indexes, bool allow_minus_one,
double default_value) {
switch (src.NumAxes()) {
case 1:
return Index1D(src, indexes, allow_minus_one, default_value);
case 2:
return Index2D(src, indexes, allow_minus_one);
default:
K2_LOG(FATAL) << "Unsupported number of axes: " << src.NumAxes()
<< "\n. Only 1-D and 2-D tensors are supported.";
return src; // prevent compiler warnings
}
}
template <typename T>
static void IndexAdd1DImpl(ContextPtr context, const T *src_data,
int32_t src_dim, int32_t src_stride,
const int32_t *indexes_data, bool allow_minus_one,
int32_t dest_dim, int32_t dest_stride,
T *dest_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL(
context, src_dim, lambda_add, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, -1);
if (index != -1)
AtomicAdd(dest_data + index * dest_stride,
src_data[i * src_stride]);
});
return;
}
// handle the case: allow_minus_one == false
K2_EVAL(
context, src_dim, lambda_add, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, 0);
AtomicAdd(dest_data + index * dest_stride, src_data[i * src_stride]);
});
}
template <typename T>
static void IndexAdd2DImpl(ContextPtr context, const T *src_data,
int32_t src_dim0, int32_t src_dim1,
int32_t src_stride0, int32_t src_stride1,
const int32_t *indexes_data, bool allow_minus_one,
int32_t dest_dim, int32_t dest_stride0,
int32_t dest_stride1, T *dest_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL2(
context, src_dim0, src_dim1, lambda_add, (int32_t i, int32_t j)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, -1);
if (index != -1)
AtomicAdd(dest_data + index * dest_stride0 + j * dest_stride1,
src_data[i * src_stride0 + j * src_stride1]);
});
return;
}
K2_EVAL2(
context, src_dim0, src_dim1, lambda_add, (int32_t i, int32_t j)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, 0);
AtomicAdd(dest_data + index * dest_stride0 + j * dest_stride1,
src_data[i * src_stride0 + j * src_stride1]);
});
}
static void IndexAdd1D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, Tensor *dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK_NE(dest, nullptr);
K2_CHECK_EQ(dest->NumAxes(), 1);
ContextPtr context = GetContext(src, indexes, *dest);
Dtype dtype = src.GetDtype();
const int32_t *indexes_data = indexes.Data();
int32_t src_dim = src.Dim(0);
K2_CHECK_EQ(src_dim, indexes.Dim());
int32_t src_stride = src.Stride(0);
int32_t dest_dim = dest->Dim(0);
int32_t dest_stride = dest->Stride(0);
// atomiAdd is not available for some types, e.g., int8_t and int16_t
// see
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd
FOR_REAL_AND_INT32_TYPES(
dtype, T,
IndexAdd1DImpl<T>(context, src.Data<T>(), src_dim, src_stride,
indexes_data, allow_minus_one, dest_dim, dest_stride,
dest->Data<T>()));
}
static void IndexAdd2D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, Tensor *dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_NE(dest, nullptr);
K2_CHECK_EQ(dest->NumAxes(), 2);
K2_CHECK_EQ(dest->Dim(1), src.Dim(1));
ContextPtr context = GetContext(src, indexes, *dest);
Dtype dtype = src.GetDtype();
int32_t src_dim0 = src.Dim(0);
int32_t src_dim1 = src.Dim(1);
K2_CHECK_EQ(src_dim0, indexes.Dim());
int32_t src_stride0 = src.Stride(0);
int32_t src_stride1 = src.Stride(1);
int32_t dest_dim = dest->Dim(0);
int32_t dest_stride0 = dest->Stride(0);
int32_t dest_stride1 = dest->Stride(1);
const int32_t *indexes_data = indexes.Data();
FOR_REAL_AND_INT32_TYPES(
dtype, T,
IndexAdd2DImpl<T>(context, src.Data<T>(), src_dim0, src_dim1, src_stride0,
src_stride1, indexes_data, allow_minus_one, dest_dim,
dest_stride0, dest_stride1, dest->Data<T>()));
}
void IndexAdd(Tensor &src, Array1<int32_t> &indexes, bool allow_minus_one,
Tensor *dest) {
switch (src.NumAxes()) {
case 1:
IndexAdd1D(src, indexes, allow_minus_one, dest);
break;
case 2:
IndexAdd2D(src, indexes, allow_minus_one, dest);
break;
default:
K2_LOG(FATAL) << "Unsupported number of axes: " << src.NumAxes()
<< "\n. Only 1-D and 2-D tensors are supported.";
break;
}
}
template <typename T>
static void SimpleRaggedIndexSelect1DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim,
Ragged<int32_t> &indexes,
int32_t ans_dim, T *ans_data) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(indexes.NumAxes(), 2);
int32_t indexes_dim0 = indexes.Dim0(),
indexes_num_elems = indexes.NumElements();
const int32_t *indexes_row_ids_data = indexes.RowIds(1).Data();
const int32_t *indexes_data = indexes.values.Data();
K2_CHECK_EQ(ans_dim, indexes_dim0);
K2_EVAL(
context, ans_dim, lambda_init_ans,
(int32_t i)->void { ans_data[i] = 0; });
Array1<int32_t> non_zero_indexes(context, ans_dim, -1);
int32_t *non_zero_indexes_data = non_zero_indexes.Data();
K2_EVAL(
context, indexes_num_elems, lambda_set_ans_data, (int32_t i)->void {
int32_t src_index = indexes_data[i];
K2_CHECK_GE(src_index, 0);
K2_CHECK_LT(src_index, src_dim);
T value = src_data[src_index * src_stride];
int32_t ans_index = indexes_row_ids_data[i];
if (value != 0) {
non_zero_indexes_data[ans_index] = i;
ans_data[ans_index] = value;
}
});
// check if there is at most one non-zero element in src for each sub-list
Array1<int32_t> status(context, 1, 0); // 0 -> success; otherwise 1 + row_id
// of bad row in `indexes`
int32_t *status_data = status.Data();
K2_EVAL(
context, indexes_num_elems, lambda_check_status, (int32_t i)->void {
int32_t src_index = indexes_data[i];
T value = src_data[src_index * src_stride];
int32_t ans_index = indexes_row_ids_data[i];
if (value != 0 && non_zero_indexes_data[ans_index] != i)
status_data[0] = 1 + ans_index;
});
int32_t s = status[0];
if (s != 0) {
Array1<T> indexed_values(context, indexes_num_elems);
T *indexed_values_data = indexed_values.Data();
K2_EVAL(
context, indexes_num_elems, lambda_set_values, (int32_t i)->void {
int32_t src_index = indexes_data[i];
indexed_values_data[i] = src_data[src_index * src_stride];
});
Array1<int32_t> row_splits = indexes.RowSplits(1);
K2_LOG(FATAL) << "There must be at most one non-zero "
"element in src for any sub-list in indexes; sub-list "
<< (s - 1) << " has too many elements: "
<< indexed_values.Arange(row_splits[s - 1], row_splits[s]);
}
}
Tensor SimpleRaggedIndexSelect1D(Tensor &src, Ragged<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim0()});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
int32_t src_dim = src.Dim(0);
int32_t ans_dim = ans.Dim(0);
// Note below src.Data<T> will check if T is compatible with `dtype`.
FOR_ALL_DTYPES(dtype, T,
SimpleRaggedIndexSelect1DImpl<T>(context, src.Data<T>(),
src_stride, src_dim, indexes,
ans_dim, ans.Data<T>()));
return ans;
}
template <typename Real>
struct DiscountedCumSumElement {
Real y; // y is the partial sums of x values. Initially it is just a
// single x value. In general each x is multiplied by all
// previous gammas.
Real gamma; // gamma is the product of gammas along a range of elements
};
template <typename Real>
struct CombineCumSumOp {
__device__ DiscountedCumSumElement<Real> operator() (
DiscountedCumSumElement<Real> &a,
DiscountedCumSumElement<Real> &b) const {
return DiscountedCumSumElement<Real>{b.y + b.gamma * a.y,
a.gamma * b.gamma};
}
};
// A stateful callback functor that maintains a running prefix to be applied
// during consecutive scan operations.
template <typename Real>
struct BlockPrefixCallbackOp {
using Elem = DiscountedCumSumElement<Real>;
Elem running_total;
// Constructor
__device__ BlockPrefixCallbackOp(): running_total{0.0, 0.0} { }
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ Elem operator()(Elem block_aggregate) {
Elem old_prefix = running_total;
running_total = CombineCumSumOp<Real>()(running_total, block_aggregate);
return old_prefix;
}
};
/*
Notes for DiscountedCumSum.
It implements a discounted sum along a sequence. Suppose we have x_i, gamma_i and
y_i, for 0 <= i < T. Then we do:
y_0 = x_0
y_i = x_i + y_{i-1} gamma_i
for 0 < i < T. (This is done as a generic inclusive-scan/inclusive-sum with a special
reduction op).
See DiscountedCumSumElement and CombineCumSumOp for how we use a special operator to
do this as an inclusive-sum.
The tensors involved must be 2-dimensional with dimensions (N, T) where N is
the batch size and T the time duration.
Each thread-block is of (x,y,z) size (ThreadsPerBlock,1,1), and it processes N
items. It processes ThreadsPerBlock items at a time; and if T >
ThreadsPerBlock it simply loops to cover the remaining items.
The grid size (x,y,z) is (X,Y,1) where the X and Y together cover the "N"
(batch) dimension. (We can't cover it just in the X dimension because of
limits on the size of each time).
@param [in] N The batch size, i.e. number of separate sequences. We expect
that N <= gridDim.x * gridDim.y.
@param [in] T The sequence length. There is no constraint on the sequence
length; the kernel deals with ThreadsPerBlock items at a time,
and takes care of T > ThreadsPerBlock by looping.
@param [in] x Pointer to the x input data, which is an array of shape (N,T)
@param [in] x_stride0 Stride along axis 0 of of the `x` data
@param [in] gamma Pointer to the gamma input data, which is an array of shape (N,T)
@param [in] gamma_stride0 Stride along axis 0 of of the `gamma` data
@param [in] y Pointer to the y output data, which is an array of shape (N,T)
@param [in] y_stride0 Stride along axis 0 of the `y` data
@param [in] stride1 Stride along axis 1 of the three arrays (this is expected
to be identical, nonzero, and preferably -1 or 1.
*/
template <typename Real, int ThreadsPerBlock>
static __global__ void DiscountedCumSumKernel(int N, int T, const Real *x,
int x_stride0, const Real *gamma,
int gamma_stride0, Real *y,
int y_stride0, int stride1) {
int n_idx = blockIdx.y * gridDim.x + blockIdx.x;
if (n_idx >= N)
return;
x += x_stride0 * n_idx;
gamma += gamma_stride0 * n_idx;
y += y_stride0 * n_idx;
int thread_idx = threadIdx.x;
using Elem = DiscountedCumSumElement<Real>;
BlockPrefixCallbackOp<Real> prefix_callback;
typedef hipcub::BlockScan<Elem, ThreadsPerBlock> BlockScan;
// shared memory for BlockScan
__shared__ typename BlockScan::TempStorage temp_storage;
for (int base_t = 0; base_t < T; base_t += ThreadsPerBlock) {
Elem elem;
// Load x and gamma from memory. These reads will be coalesced (which is
// the advantage of having each thread process one element at this stage;
// although we spend more time with raking reduction than we really
// need to).
if (base_t + thread_idx < T) {
elem.y = x[(base_t + thread_idx) * stride1];
elem.gamma = gamma[(base_t + thread_idx) * stride1];
}
CombineCumSumOp<Real> op;
// the last arg is a callback functor that provides us the aggregate of this
// block and which is expected to return the element that we want to add to
BlockScan(temp_storage).InclusiveScan(elem, elem, op, prefix_callback);
__syncthreads();
if (base_t + thread_idx < T) y[(base_t + thread_idx) * stride1] = elem.y;
}
}
template <typename Real, int ThreadsPerBlock>
void DiscountedCumSumCudaImpl(hipStream_t stream,
int N, int T,
const Real *x, int x_stride0,
const Real *gamma, int gamma_stride0,
Real *y, int y_stride0, int stride1) {
int32_t tot_grid_size = N;
int32_t x_grid_size = (tot_grid_size < (1 << 20)
? std::min<int32_t>(tot_grid_size, (1 << 10))
: 32768),
y_grid_size = NumBlocks(tot_grid_size, x_grid_size);
dim3 grid_dim(x_grid_size, y_grid_size, 1),
block_dim(ThreadsPerBlock, 1, 1);
K2_CUDA_SAFE_CALL(
hipLaunchKernelGGL(( DiscountedCumSumKernel<Real, ThreadsPerBlock>)
, dim3(grid_dim), dim3(block_dim), 0, stream,
N, T, x, x_stride0, gamma, gamma_stride0, y, y_stride0, stride1));
}
template <typename Real>
static void DiscountedCumSumCpuImpl(int N, int T,
const Real *x, int x_stride0,
const Real *gamma, int gamma_stride0,
Real *y, int y_stride0,
int stride1) {
for (int32_t n = 0; n < N; n++,
x += x_stride0, gamma += gamma_stride0, y += y_stride0) {
Real cur_sum = 0.0;
for (int32_t t = 0; t < T; t++) {
cur_sum = x[t * stride1] + cur_sum * gamma[t * stride1];
y[t * stride1] = cur_sum;
}
}
}
void DiscountedCumSum(const Tensor &src, const Tensor &gamma, Tensor *dest) {
// check contexts compatible:
if (!(IsCompatible(src, gamma) && IsCompatible(src, *dest))) {
K2_LOG(FATAL) << "Tensors are on different devices";
}
if (!(src.NumAxes() == 2 && gamma.NumAxes() == 2 && dest->NumAxes() == 2)) {
K2_LOG(FATAL) << "Expected all num-axes to equal 2.";
}
if (!(src.SameDims(gamma) && src.SameDims(*dest))) {
K2_LOG(FATAL) << "Expected all args to have the same dim.";
}
if (!(src.Stride(1) == gamma.Stride(1) && src.Stride(1) == dest->Stride(1))) {
K2_LOG(FATAL) << "Expected all strides on dim 1 to be the same.";
}
if (!(src.GetDtype() == gamma.GetDtype() &&
src.GetDtype() == dest->GetDtype())) {
K2_LOG(FATAL) << "Expected all args to have the same dtype.";
}
int32_t N = src.Dim(0),
T = src.Dim(1),
src_stride0 = src.Stride(0),
gamma_stride0 = gamma.Stride(0),
dest_stride0 = dest->Stride(0),
stride1 = src.Stride(1); // these are all the same.
ContextPtr c = src.Context();
if (src.GetDtype() == kFloatDtype) {
if (c->GetDeviceType() == kCuda) {
DiscountedCumSumCudaImpl<float, 128>(c->GetCudaStream(), N, T,
src.Data<float>(), src_stride0,
gamma.Data<float>(), gamma_stride0,
dest->Data<float>(), dest_stride0,
stride1);
} else {
DiscountedCumSumCpuImpl<float>(N, T,
src.Data<float>(), src_stride0,
gamma.Data<float>(), gamma_stride0,
dest->Data<float>(), dest_stride0,
stride1);
}
} else if (src.GetDtype() == kDoubleDtype) {
if (c->GetDeviceType() == kCuda) {
DiscountedCumSumCudaImpl<double, 128>(c->GetCudaStream(), N, T,
src.Data<double>(), src_stride0,
gamma.Data<double>(), gamma_stride0,
dest->Data<double>(), dest_stride0,
stride1);
} else {
DiscountedCumSumCpuImpl<double>(N, T,
src.Data<double>(), src_stride0,
gamma.Data<double>(), gamma_stride0,
dest->Data<double>(), dest_stride0,
stride1);
}
} else {
K2_LOG(FATAL)
<< "This algorithm only instantiated for float and double; type is "
<< TraitsOf(src.GetDtype()).Name();
}
}
Tensor Flip(Tensor &src, int32_t axis) {
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(axis, -num_axes);
K2_CHECK_LT(axis, num_axes);
if (axis < 0)
axis += num_axes;
int32_t old_dim = src.Dim(axis);
if (old_dim <= 1)
return src; // No point copying it, it's a no-op.
TensorImplPtr src_impl = src.Impl(),
ans_impl = std::make_shared<TensorImpl>(*src_impl);
int32_t old_stride = ans_impl->shape.Stride(axis);
ans_impl->shape.SetStride(axis, -old_stride);
int64_t byte_offset = old_stride * static_cast<int64_t>(old_dim - 1) *
TraitsOf(ans_impl->dtype).NumBytes();
ans_impl->byte_offset += byte_offset;
return Tensor(ans_impl);
}
} // namespace k2
| 285e54b58bb19873a152cfaaf6a0480929df8149.cu | // k2/csrc/cuda/tensor_ops.cu
// Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey
// Fangjun Kuang,
// Haowen Qiu)
// See ../../LICENSE for clarification regarding multiple authors
#include "k2/csrc/tensor_ops.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "k2/csrc/dtype.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/nvtx.h"
namespace k2 {
template <typename T>
static void CopyTensorElements2d(ContextPtr c, int32_t dim0, int32_t dim1,
const T *src_data, int32_t src_stride0,
int32_t src_stride1, T *dest_data,
int32_t dest_stride0, int32_t dest_stride1) {
NVTX_RANGE(K2_FUNC);
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
// this is just an optimization, the other branch would work for CPU too.
for (int32_t i = 0; i < dim0; i++) {
for (int32_t j = 0; j < dim1; j++) {
dest_data[i * dest_stride0 + j * dest_stride1] =
src_data[i * src_stride0 + j * src_stride1];
}
}
} else {
K2_EVAL2(
c, dim0, dim1, lambda_set_elems, (int32_t i, int32_t j)->void {
dest_data[i * dest_stride0 + j * dest_stride1] =
src_data[i * src_stride0 + j * src_stride1];
});
}
}
template <typename T>
void CopyTensorElements1d(ContextPtr c, int32_t dim, const T *src_data,
int32_t src_stride, T *dest_data,
int32_t dest_stride) {
NVTX_RANGE(K2_FUNC);
K2_EVAL(
c, dim, lambda_set_elems, (int32_t i)->void {
dest_data[i * dest_stride] = src_data[i * src_stride];
});
}
// TODO(dan): this is far from ideal in terms of efficiency. There is no
// attempt to discover the simplest pattern that covers the copy, or to be smart
// about memory loads if it turns out to be a transposition.
void CopyTensorElements(Tensor src, Tensor dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(src.SameDims(dest));
ContextPtr c = GetContext(src, dest);
int32_t num_axes = src.NumAxes();
if (num_axes > 2) {
// For now, only directly support copies of at most 2 dims.
int32_t leading_dim = src.Dim(0);
ParallelRunner pr(c);
for (int32_t i = 0; i < leading_dim; i++) {
With(pr.NewStream());
Tensor src_part = src.Index(0, i), dest_part = dest.Index(0, i);
CopyTensorElements(src_part, dest_part);
}
} else {
const Shape &src_shape = src.GetShape(), &dest_shape = dest.GetShape();
int32_t src_stride0 = (num_axes > 0 ? src_shape.Stride(0) : 0),
dest_stride0 = (num_axes > 0 ? dest_shape.Stride(0) : 0),
dim0 = (num_axes > 0 ? src_shape.Dim(0) : 1);
Dtype dtype = src.GetDtype();
K2_CHECK(dtype == dest.GetDtype());
int32_t num_axes = src.NumAxes();
if (num_axes == 2) {
int32_t src_stride1 = src_shape.Stride(1),
dest_stride1 = dest_shape.Stride(1), dim1 = src_shape.Dim(1);
FOR_ALL_DTYPES(dtype, T,
CopyTensorElements2d<T>(
c, dim0, dim1, src.Data<T>(), src_stride0, src_stride1,
dest.Data<T>(), dest_stride0, dest_stride1));
} else {
FOR_ALL_DTYPES(
dtype, T,
CopyTensorElements1d<T>(c, dim0, src.Data<T>(), src_stride0,
dest.Data<T>(), dest_stride0));
}
}
}
Tensor ToContiguous(const Tensor &src) {
// things like this would be more efficient if we supported something like
// PyTorch's ArrayRef. not so critical to address that now though.
Tensor ans(src.Context(), src.GetDtype(), src.GetShape().Dims());
CopyTensorElements(src, ans);
return ans;
}
template <typename T, typename U>
void CastTensorElements1dContiguous(ContextPtr c, int32_t dim,
const T *src_data, U *dest_data) {
NVTX_RANGE(K2_FUNC);
K2_EVAL(
c, dim, lambda_cast_elems,
(int32_t i)->void { dest_data[i] = static_cast<U>(src_data[i]); });
}
Tensor Cast(Tensor src, Dtype new_dtype) {
NVTX_RANGE(K2_FUNC);
if (!src.IsContiguous()) src = ToContiguous(src);
ContextPtr c = src.Context();
Tensor ans(c, new_dtype, src.GetShape());
K2_DCHECK(ans.IsContiguous());
Dtype old_dtype = src.GetDtype();
int32_t dim = ans.NumElements();
FOR_ALL_DTYPES(old_dtype, T,
FOR_ALL_DTYPES(new_dtype, U,
CastTensorElements1dContiguous<T, U>(
c, dim, src.Data<T>(), ans.Data<U>())));
return ans;
}
// See the documentation of `Index`.
template <typename T>
static void Index1DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim,
const int32_t *indexes_data, bool allow_minus_one,
int32_t ans_dim, T *ans_data, double default_value) {
if (std::is_integral<T>::value) {
K2_CHECK_EQ(static_cast<T>(default_value), default_value);
}
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL(
context, ans_dim, lambda_set_values, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim);
K2_DCHECK(index >= 0 || index == -1);
T value =
(index < 0 ? T(default_value) : src_data[index * src_stride]);
ans_data[i] = value;
});
return;
}
// now handle the case allow_minus_one == false
K2_EVAL(
context, ans_dim, lambda_set_values, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim);
K2_DCHECK_GE(index, 0);
ans_data[i] = src_data[index * src_stride];
});
}
// See the documentation of `Index`.
template <typename T>
static void Index2DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim0, int32_t src_dim1,
const int32_t *indexes_data, bool allow_minus_one,
int32_t ans_dim, int32_t ans_stride, T *ans_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
if (context->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != ans_dim; ++i) {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, -1);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
if (index == -1) {
memset(cur_ans_data, 0, src_dim1 * sizeof(T));
} else {
memcpy(cur_ans_data, cur_src_data, src_dim1 * sizeof(T));
}
}
return;
}
// now for CUDA
auto lambda_set = [=] __device__(int32_t i, int32_t j) -> void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, -1);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
if (index == -1)
cur_ans_data[j] = 0;
else
cur_ans_data[j] = cur_src_data[j];
};
Eval2Device(context, ans_dim, src_dim1, lambda_set);
return;
}
// now handle the case when allow_minus_one is false
if (context->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != ans_dim; ++i) {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, 0);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
memcpy(cur_ans_data, cur_src_data, src_dim1 * sizeof(T));
}
return;
}
// now for CUDA
auto lambda_set = [=] __device__(int32_t i, int32_t j) -> void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, src_dim0);
K2_DCHECK_GE(index, 0);
T *cur_ans_data = ans_data + i * ans_stride;
const T *cur_src_data = src_data + index * src_stride;
cur_ans_data[j] = cur_src_data[j];
};
Eval2Device(context, ans_dim, src_dim1, lambda_set);
}
// See the documentation for `Index`.
// This function is for 1-D tensors.
static Tensor Index1D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, double default_value) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim()});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
const int32_t *indexes_data = indexes.Data();
int32_t src_dim = src.Dim(0);
int32_t ans_dim = ans.Dim(0);
FOR_ALL_DTYPES(
dtype, T,
Index1DImpl<T>(context, src.Data<T>(), src_stride, src_dim, indexes_data,
allow_minus_one, ans_dim, ans.Data<T>(), default_value));
return ans;
}
// See the documentation for `Index`.
// This function is for 2-D tensors.
static Tensor Index2D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim(), src.Dim(1)});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
K2_CHECK_EQ(src.Stride(1), 1);
const int32_t *indexes_data = indexes.Data();
int32_t src_dim0 = src.Dim(0);
int32_t src_dim1 = src.Dim(1);
int32_t ans_dim = ans.Dim(0);
int32_t ans_stride = ans.Stride(0);
FOR_ALL_DTYPES(dtype, T,
Index2DImpl<T>(context, src.Data<T>(), src_stride, src_dim0,
src_dim1, indexes_data, allow_minus_one,
ans_dim, ans_stride, ans.Data<T>()));
return ans;
}
Tensor Index(Tensor &src, Array1<int32_t> &indexes, bool allow_minus_one,
double default_value) {
switch (src.NumAxes()) {
case 1:
return Index1D(src, indexes, allow_minus_one, default_value);
case 2:
return Index2D(src, indexes, allow_minus_one);
default:
K2_LOG(FATAL) << "Unsupported number of axes: " << src.NumAxes()
<< "\n. Only 1-D and 2-D tensors are supported.";
return src; // prevent compiler warnings
}
}
template <typename T>
static void IndexAdd1DImpl(ContextPtr context, const T *src_data,
int32_t src_dim, int32_t src_stride,
const int32_t *indexes_data, bool allow_minus_one,
int32_t dest_dim, int32_t dest_stride,
T *dest_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL(
context, src_dim, lambda_add, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, -1);
if (index != -1)
AtomicAdd(dest_data + index * dest_stride,
src_data[i * src_stride]);
});
return;
}
// handle the case: allow_minus_one == false
K2_EVAL(
context, src_dim, lambda_add, (int32_t i)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, 0);
AtomicAdd(dest_data + index * dest_stride, src_data[i * src_stride]);
});
}
template <typename T>
static void IndexAdd2DImpl(ContextPtr context, const T *src_data,
int32_t src_dim0, int32_t src_dim1,
int32_t src_stride0, int32_t src_stride1,
const int32_t *indexes_data, bool allow_minus_one,
int32_t dest_dim, int32_t dest_stride0,
int32_t dest_stride1, T *dest_data) {
NVTX_RANGE(K2_FUNC);
if (allow_minus_one) {
K2_EVAL2(
context, src_dim0, src_dim1, lambda_add, (int32_t i, int32_t j)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, -1);
if (index != -1)
AtomicAdd(dest_data + index * dest_stride0 + j * dest_stride1,
src_data[i * src_stride0 + j * src_stride1]);
});
return;
}
K2_EVAL2(
context, src_dim0, src_dim1, lambda_add, (int32_t i, int32_t j)->void {
int32_t index = indexes_data[i];
K2_DCHECK_LT(index, dest_dim);
K2_DCHECK_GE(index, 0);
AtomicAdd(dest_data + index * dest_stride0 + j * dest_stride1,
src_data[i * src_stride0 + j * src_stride1]);
});
}
static void IndexAdd1D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, Tensor *dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK_NE(dest, nullptr);
K2_CHECK_EQ(dest->NumAxes(), 1);
ContextPtr context = GetContext(src, indexes, *dest);
Dtype dtype = src.GetDtype();
const int32_t *indexes_data = indexes.Data();
int32_t src_dim = src.Dim(0);
K2_CHECK_EQ(src_dim, indexes.Dim());
int32_t src_stride = src.Stride(0);
int32_t dest_dim = dest->Dim(0);
int32_t dest_stride = dest->Stride(0);
// atomiAdd is not available for some types, e.g., int8_t and int16_t
// see
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd
FOR_REAL_AND_INT32_TYPES(
dtype, T,
IndexAdd1DImpl<T>(context, src.Data<T>(), src_dim, src_stride,
indexes_data, allow_minus_one, dest_dim, dest_stride,
dest->Data<T>()));
}
static void IndexAdd2D(Tensor &src, Array1<int32_t> &indexes,
bool allow_minus_one, Tensor *dest) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_NE(dest, nullptr);
K2_CHECK_EQ(dest->NumAxes(), 2);
K2_CHECK_EQ(dest->Dim(1), src.Dim(1));
ContextPtr context = GetContext(src, indexes, *dest);
Dtype dtype = src.GetDtype();
int32_t src_dim0 = src.Dim(0);
int32_t src_dim1 = src.Dim(1);
K2_CHECK_EQ(src_dim0, indexes.Dim());
int32_t src_stride0 = src.Stride(0);
int32_t src_stride1 = src.Stride(1);
int32_t dest_dim = dest->Dim(0);
int32_t dest_stride0 = dest->Stride(0);
int32_t dest_stride1 = dest->Stride(1);
const int32_t *indexes_data = indexes.Data();
FOR_REAL_AND_INT32_TYPES(
dtype, T,
IndexAdd2DImpl<T>(context, src.Data<T>(), src_dim0, src_dim1, src_stride0,
src_stride1, indexes_data, allow_minus_one, dest_dim,
dest_stride0, dest_stride1, dest->Data<T>()));
}
void IndexAdd(Tensor &src, Array1<int32_t> &indexes, bool allow_minus_one,
Tensor *dest) {
switch (src.NumAxes()) {
case 1:
IndexAdd1D(src, indexes, allow_minus_one, dest);
break;
case 2:
IndexAdd2D(src, indexes, allow_minus_one, dest);
break;
default:
K2_LOG(FATAL) << "Unsupported number of axes: " << src.NumAxes()
<< "\n. Only 1-D and 2-D tensors are supported.";
break;
}
}
template <typename T>
static void SimpleRaggedIndexSelect1DImpl(ContextPtr context, const T *src_data,
int32_t src_stride, int32_t src_dim,
Ragged<int32_t> &indexes,
int32_t ans_dim, T *ans_data) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(indexes.NumAxes(), 2);
int32_t indexes_dim0 = indexes.Dim0(),
indexes_num_elems = indexes.NumElements();
const int32_t *indexes_row_ids_data = indexes.RowIds(1).Data();
const int32_t *indexes_data = indexes.values.Data();
K2_CHECK_EQ(ans_dim, indexes_dim0);
K2_EVAL(
context, ans_dim, lambda_init_ans,
(int32_t i)->void { ans_data[i] = 0; });
Array1<int32_t> non_zero_indexes(context, ans_dim, -1);
int32_t *non_zero_indexes_data = non_zero_indexes.Data();
K2_EVAL(
context, indexes_num_elems, lambda_set_ans_data, (int32_t i)->void {
int32_t src_index = indexes_data[i];
K2_CHECK_GE(src_index, 0);
K2_CHECK_LT(src_index, src_dim);
T value = src_data[src_index * src_stride];
int32_t ans_index = indexes_row_ids_data[i];
if (value != 0) {
non_zero_indexes_data[ans_index] = i;
ans_data[ans_index] = value;
}
});
// check if there is at most one non-zero element in src for each sub-list
Array1<int32_t> status(context, 1, 0); // 0 -> success; otherwise 1 + row_id
// of bad row in `indexes`
int32_t *status_data = status.Data();
K2_EVAL(
context, indexes_num_elems, lambda_check_status, (int32_t i)->void {
int32_t src_index = indexes_data[i];
T value = src_data[src_index * src_stride];
int32_t ans_index = indexes_row_ids_data[i];
if (value != 0 && non_zero_indexes_data[ans_index] != i)
status_data[0] = 1 + ans_index;
});
int32_t s = status[0];
if (s != 0) {
Array1<T> indexed_values(context, indexes_num_elems);
T *indexed_values_data = indexed_values.Data();
K2_EVAL(
context, indexes_num_elems, lambda_set_values, (int32_t i)->void {
int32_t src_index = indexes_data[i];
indexed_values_data[i] = src_data[src_index * src_stride];
});
Array1<int32_t> row_splits = indexes.RowSplits(1);
K2_LOG(FATAL) << "There must be at most one non-zero "
"element in src for any sub-list in indexes; sub-list "
<< (s - 1) << " has too many elements: "
<< indexed_values.Arange(row_splits[s - 1], row_splits[s]);
}
}
Tensor SimpleRaggedIndexSelect1D(Tensor &src, Ragged<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 1);
K2_CHECK(IsCompatible(src, indexes));
Dtype dtype = src.GetDtype();
ContextPtr &context = src.Context();
Tensor ans(context, dtype, {indexes.Dim0()});
K2_CHECK(ans.IsContiguous());
int32_t src_stride = src.Stride(0);
int32_t src_dim = src.Dim(0);
int32_t ans_dim = ans.Dim(0);
// Note below src.Data<T> will check if T is compatible with `dtype`.
FOR_ALL_DTYPES(dtype, T,
SimpleRaggedIndexSelect1DImpl<T>(context, src.Data<T>(),
src_stride, src_dim, indexes,
ans_dim, ans.Data<T>()));
return ans;
}
template <typename Real>
struct DiscountedCumSumElement {
Real y; // y is the partial sums of x values. Initially it is just a
// single x value. In general each x is multiplied by all
// previous gammas.
Real gamma; // gamma is the product of gammas along a range of elements
};
template <typename Real>
struct CombineCumSumOp {
__device__ DiscountedCumSumElement<Real> operator() (
DiscountedCumSumElement<Real> &a,
DiscountedCumSumElement<Real> &b) const {
return DiscountedCumSumElement<Real>{b.y + b.gamma * a.y,
a.gamma * b.gamma};
}
};
// A stateful callback functor that maintains a running prefix to be applied
// during consecutive scan operations.
template <typename Real>
struct BlockPrefixCallbackOp {
using Elem = DiscountedCumSumElement<Real>;
Elem running_total;
// Constructor
__device__ BlockPrefixCallbackOp(): running_total{0.0, 0.0} { }
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ Elem operator()(Elem block_aggregate) {
Elem old_prefix = running_total;
running_total = CombineCumSumOp<Real>()(running_total, block_aggregate);
return old_prefix;
}
};
/*
Notes for DiscountedCumSum.
It implements a discounted sum along a sequence. Suppose we have x_i, gamma_i and
y_i, for 0 <= i < T. Then we do:
y_0 = x_0
y_i = x_i + y_{i-1} gamma_i
for 0 < i < T. (This is done as a generic inclusive-scan/inclusive-sum with a special
reduction op).
See DiscountedCumSumElement and CombineCumSumOp for how we use a special operator to
do this as an inclusive-sum.
The tensors involved must be 2-dimensional with dimensions (N, T) where N is
the batch size and T the time duration.
Each thread-block is of (x,y,z) size (ThreadsPerBlock,1,1), and it processes N
items. It processes ThreadsPerBlock items at a time; and if T >
ThreadsPerBlock it simply loops to cover the remaining items.
The grid size (x,y,z) is (X,Y,1) where the X and Y together cover the "N"
(batch) dimension. (We can't cover it just in the X dimension because of
limits on the size of each time).
@param [in] N The batch size, i.e. number of separate sequences. We expect
that N <= gridDim.x * gridDim.y.
@param [in] T The sequence length. There is no constraint on the sequence
length; the kernel deals with ThreadsPerBlock items at a time,
and takes care of T > ThreadsPerBlock by looping.
@param [in] x Pointer to the x input data, which is an array of shape (N,T)
@param [in] x_stride0 Stride along axis 0 of of the `x` data
@param [in] gamma Pointer to the gamma input data, which is an array of shape (N,T)
@param [in] gamma_stride0 Stride along axis 0 of of the `gamma` data
@param [in] y Pointer to the y output data, which is an array of shape (N,T)
@param [in] y_stride0 Stride along axis 0 of the `y` data
@param [in] stride1 Stride along axis 1 of the three arrays (this is expected
to be identical, nonzero, and preferably -1 or 1.
*/
template <typename Real, int ThreadsPerBlock>
static __global__ void DiscountedCumSumKernel(int N, int T, const Real *x,
int x_stride0, const Real *gamma,
int gamma_stride0, Real *y,
int y_stride0, int stride1) {
int n_idx = blockIdx.y * gridDim.x + blockIdx.x;
if (n_idx >= N)
return;
x += x_stride0 * n_idx;
gamma += gamma_stride0 * n_idx;
y += y_stride0 * n_idx;
int thread_idx = threadIdx.x;
using Elem = DiscountedCumSumElement<Real>;
BlockPrefixCallbackOp<Real> prefix_callback;
typedef cub::BlockScan<Elem, ThreadsPerBlock> BlockScan;
// shared memory for BlockScan
__shared__ typename BlockScan::TempStorage temp_storage;
for (int base_t = 0; base_t < T; base_t += ThreadsPerBlock) {
Elem elem;
// Load x and gamma from memory. These reads will be coalesced (which is
// the advantage of having each thread process one element at this stage;
// although we spend more time with raking reduction than we really
// need to).
if (base_t + thread_idx < T) {
elem.y = x[(base_t + thread_idx) * stride1];
elem.gamma = gamma[(base_t + thread_idx) * stride1];
}
CombineCumSumOp<Real> op;
// the last arg is a callback functor that provides us the aggregate of this
// block and which is expected to return the element that we want to add to
BlockScan(temp_storage).InclusiveScan(elem, elem, op, prefix_callback);
__syncthreads();
if (base_t + thread_idx < T) y[(base_t + thread_idx) * stride1] = elem.y;
}
}
template <typename Real, int ThreadsPerBlock>
void DiscountedCumSumCudaImpl(cudaStream_t stream,
int N, int T,
const Real *x, int x_stride0,
const Real *gamma, int gamma_stride0,
Real *y, int y_stride0, int stride1) {
int32_t tot_grid_size = N;
int32_t x_grid_size = (tot_grid_size < (1 << 20)
? std::min<int32_t>(tot_grid_size, (1 << 10))
: 32768),
y_grid_size = NumBlocks(tot_grid_size, x_grid_size);
dim3 grid_dim(x_grid_size, y_grid_size, 1),
block_dim(ThreadsPerBlock, 1, 1);
K2_CUDA_SAFE_CALL(
DiscountedCumSumKernel<Real, ThreadsPerBlock>
<<<grid_dim, block_dim, 0, stream>>>(
N, T, x, x_stride0, gamma, gamma_stride0, y, y_stride0, stride1));
}
template <typename Real>
static void DiscountedCumSumCpuImpl(int N, int T,
const Real *x, int x_stride0,
const Real *gamma, int gamma_stride0,
Real *y, int y_stride0,
int stride1) {
for (int32_t n = 0; n < N; n++,
x += x_stride0, gamma += gamma_stride0, y += y_stride0) {
Real cur_sum = 0.0;
for (int32_t t = 0; t < T; t++) {
cur_sum = x[t * stride1] + cur_sum * gamma[t * stride1];
y[t * stride1] = cur_sum;
}
}
}
void DiscountedCumSum(const Tensor &src, const Tensor &gamma, Tensor *dest) {
// check contexts compatible:
if (!(IsCompatible(src, gamma) && IsCompatible(src, *dest))) {
K2_LOG(FATAL) << "Tensors are on different devices";
}
if (!(src.NumAxes() == 2 && gamma.NumAxes() == 2 && dest->NumAxes() == 2)) {
K2_LOG(FATAL) << "Expected all num-axes to equal 2.";
}
if (!(src.SameDims(gamma) && src.SameDims(*dest))) {
K2_LOG(FATAL) << "Expected all args to have the same dim.";
}
if (!(src.Stride(1) == gamma.Stride(1) && src.Stride(1) == dest->Stride(1))) {
K2_LOG(FATAL) << "Expected all strides on dim 1 to be the same.";
}
if (!(src.GetDtype() == gamma.GetDtype() &&
src.GetDtype() == dest->GetDtype())) {
K2_LOG(FATAL) << "Expected all args to have the same dtype.";
}
int32_t N = src.Dim(0),
T = src.Dim(1),
src_stride0 = src.Stride(0),
gamma_stride0 = gamma.Stride(0),
dest_stride0 = dest->Stride(0),
stride1 = src.Stride(1); // these are all the same.
ContextPtr c = src.Context();
if (src.GetDtype() == kFloatDtype) {
if (c->GetDeviceType() == kCuda) {
DiscountedCumSumCudaImpl<float, 128>(c->GetCudaStream(), N, T,
src.Data<float>(), src_stride0,
gamma.Data<float>(), gamma_stride0,
dest->Data<float>(), dest_stride0,
stride1);
} else {
DiscountedCumSumCpuImpl<float>(N, T,
src.Data<float>(), src_stride0,
gamma.Data<float>(), gamma_stride0,
dest->Data<float>(), dest_stride0,
stride1);
}
} else if (src.GetDtype() == kDoubleDtype) {
if (c->GetDeviceType() == kCuda) {
DiscountedCumSumCudaImpl<double, 128>(c->GetCudaStream(), N, T,
src.Data<double>(), src_stride0,
gamma.Data<double>(), gamma_stride0,
dest->Data<double>(), dest_stride0,
stride1);
} else {
DiscountedCumSumCpuImpl<double>(N, T,
src.Data<double>(), src_stride0,
gamma.Data<double>(), gamma_stride0,
dest->Data<double>(), dest_stride0,
stride1);
}
} else {
K2_LOG(FATAL)
<< "This algorithm only instantiated for float and double; type is "
<< TraitsOf(src.GetDtype()).Name();
}
}
Tensor Flip(Tensor &src, int32_t axis) {
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(axis, -num_axes);
K2_CHECK_LT(axis, num_axes);
if (axis < 0)
axis += num_axes;
int32_t old_dim = src.Dim(axis);
if (old_dim <= 1)
return src; // No point copying it, it's a no-op.
TensorImplPtr src_impl = src.Impl(),
ans_impl = std::make_shared<TensorImpl>(*src_impl);
int32_t old_stride = ans_impl->shape.Stride(axis);
ans_impl->shape.SetStride(axis, -old_stride);
int64_t byte_offset = old_stride * static_cast<int64_t>(old_dim - 1) *
TraitsOf(ans_impl->dtype).NumBytes();
ans_impl->byte_offset += byte_offset;
return Tensor(ans_impl);
}
} // namespace k2
|
ab9ef387cfa5391b9dd0a71a698115ff7bcaa68f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N (1024*2048)
#define THREADS_PER_BLOCK 37
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index<n)
c[index] = a[index] + b[index];
}
int main(void) {
int NrBlocks = N/THREADS_PER_BLOCK;
if (N % THREADS_PER_BLOCK !=0)
NrBlocks+=1;
printf("Launching kernel with:\n");
printf("\t NrBlocks: %d\n", NrBlocks);
printf("\t THREADS_PER_BLOCK: %d\n", THREADS_PER_BLOCK);
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
printf("Preparing vectors of size %d\n", N);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for (int i=0; i<N; i++)
{
a[i] = 1;
b[i] = 4;
c[i] = 0;
}
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with several
// blocks and THREADS_PER_BLOCK many threads
// per block
hipLaunchKernelGGL(( add), dim3(NrBlocks),dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, N);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
printf("\nFirst 10 elements of resulting vector are:\n");
for (int i=0; i<10; i++)
{
printf("%d ", c[i]);
}
printf("\nLast 10 elements of resulting vector are:\n");
for (int i=N-10; i<N; i++)
{
printf("%d ", c[i]);
}
printf("\n");
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| ab9ef387cfa5391b9dd0a71a698115ff7bcaa68f.cu | #include <stdio.h>
#define N (1024*2048)
#define THREADS_PER_BLOCK 37
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index<n)
c[index] = a[index] + b[index];
}
int main(void) {
int NrBlocks = N/THREADS_PER_BLOCK;
if (N % THREADS_PER_BLOCK !=0)
NrBlocks+=1;
printf("Launching kernel with:\n");
printf("\t NrBlocks: %d\n", NrBlocks);
printf("\t THREADS_PER_BLOCK: %d\n", THREADS_PER_BLOCK);
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
printf("Preparing vectors of size %d\n", N);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for (int i=0; i<N; i++)
{
a[i] = 1;
b[i] = 4;
c[i] = 0;
}
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with several
// blocks and THREADS_PER_BLOCK many threads
// per block
add<<<NrBlocks,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("\nFirst 10 elements of resulting vector are:\n");
for (int i=0; i<10; i++)
{
printf("%d ", c[i]);
}
printf("\nLast 10 elements of resulting vector are:\n");
for (int i=N-10; i<N; i++)
{
printf("%d ", c[i]);
}
printf("\n");
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
81d081ac76c2221848343ce21364594dfbf6d68d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__
void histogram_kernel(unsigned int pass,
unsigned int * d_bins,
unsigned int* const d_input,
const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
unsigned int one = 1;
int bin = ((d_input[mid] & (one<<pass)) == (one<<pass)) ? 1 : 0;
if(bin)
atomicAdd(&d_bins[1], 1);
else
atomicAdd(&d_bins[0], 1);
}
// Let's run a single exclusive scan, but then when we move, for zero vals, let's take mid - val of scan there
__global__
void exclusive_scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize) {
int mid = threadIdx.x + threadSize * base;
int block = threadSize*base;
unsigned int one = 1;
if(mid >= size)
return;
unsigned int val = 0;
if(mid > 0)
val = ((d_inputVals[mid-1] & (one<<pass)) == (one<<pass)) ? 1 : 0;
else
val = 0;
d_output[mid] = val;
__syncthreads();
for(int s = 1; s <= threadSize; s *= 2) {
int spot = mid - s;
if(spot >= 0 && spot >= threadSize*base)
val = d_output[spot];
__syncthreads();
if(spot >= 0 && spot >= threadSize*base)
d_output[mid] += val;
__syncthreads();
}
if(base > 0)
d_output[mid] += d_output[base*threadSize - 1];
}
__global__
void test_kernel(unsigned int pass,
unsigned int * d_output,
size_t numElems)
{
int mid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int one=1;
unsigned int val = (unsigned int)mid;
if(mid < numElems) {
d_output[mid] = (val & (one<<pass)) == (one<<pass) ? 1 : 0;
}
}
void test(unsigned int pass) {
int numtest = 24;
unsigned int* d_out;
checkCudaErrors(hipMalloc(&d_out, sizeof(unsigned int)*numtest));
unsigned int h_out[numtest];
hipLaunchKernelGGL(( test_kernel), dim3(dim3(1)), dim3(dim3(numtest)), 0, 0, pass, d_out, numtest);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&h_out, d_out, numtest*sizeof(unsigned int), hipMemcpyDeviceToHost));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for(int i = 0; i< numtest; i++) {
printf("%d: %d, ", i, h_out[i]);
}
printf("\n");
checkCudaErrors(hipFree(d_out));
}
__global__
void move_kernel(
unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* d_outputMove,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= numElems)
return;
unsigned int scan=0;
unsigned int base=0;
unsigned int one= 1;
if( ( d_inputVals[mid] & (one<<pass)) == (1<<pass)) {
scan = d_scanned[mid];
base = one_pos;
} else {
scan = (mid) - d_scanned[mid];
base = 0;
}
d_outputMove[mid] = base+scan;
d_outputPos[base+scan] = d_inputPos[mid];//d_inputPos[0];
d_outputVals[base+scan] = d_inputVals[mid];//base+scan;//d_inputVals[0];
}
int debug = 1;
void debug_device_array(char* name, int l, unsigned int * d_arr, int numElems) {
if(!debug)
return;
unsigned int h_arr[l];
checkCudaErrors(hipMemcpy(&h_arr, d_arr, l*sizeof(unsigned int), hipMemcpyDeviceToHost));
printf(name);
printf(" ");
for(int i=0; i < l; i++) {
printf("%d ", h_arr[i]);
}
printf("\n");
unsigned int max = 0;
unsigned int min = 1000000;
unsigned int h_arr2[numElems];
checkCudaErrors(hipMemcpy(&h_arr2, d_arr, numElems*sizeof(unsigned int), hipMemcpyDeviceToHost));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for(int i = 0; i < numElems; i++) {
if(h_arr2[i] < min)
min = h_arr2[i];
if(h_arr2[i] > max)
max = h_arr2[i];
}
printf("max %d min %d\n", max, min);
}
void verify_scan(unsigned int * d_arr, unsigned int * d_scanned, int numElems, int pass) {
unsigned int h_arr[3000];
unsigned int one =1;
unsigned int h_scanned[3000];
checkCudaErrors(hipMemcpy(&h_arr, d_arr, 3000*sizeof(unsigned int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&h_scanned, d_scanned, 3000*sizeof(unsigned int), hipMemcpyDeviceToHost));
unsigned int acc = 0;
for(int i = 0; i < 3000; i++) {
if(acc != h_scanned[i]) {
printf("wrong at %d %d != %d\n", i, acc, h_scanned[i]);
}
acc+= ((h_arr[i] & (one<<pass)) == (one<<pass)) ? 1 : 0;
}
}
/*
void verify_sort(unsigned int * d_sorted, int numElems, int pass) {
unsigned int h_sorted[3000];
checkCudaErrors(hipMemcpy(&h_sorted, d_sorted, 3000*sizeof(unsigned int), hipMemcpyDeviceToHost));
unsigned int last = h_scanned[0];
for(int i = 1; i < 3000; i++) {
if(acc != h_scanned[i]) {
printf("wrong at %d %d != %d\n", i, acc, h_scanned[i]);
}
acc+= ((h_arr[i] & (1<<pass) == (1<<pass)) ? 1 : 0);
}
}*/
// referred this online as it gives you a good max size for n/d
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
/*for(unsigned int i = 0; i < 32; i++) {
printf("for testing %d \n", i);
test(i);
}*/
//return;
unsigned int* d_bins;
unsigned int h_bins[2];
unsigned int* d_scanned;
unsigned int* d_moved;
const size_t histo_size = 2*sizeof(unsigned int);
const size_t arr_size = numElems*sizeof(unsigned int);
checkCudaErrors(hipMalloc(&d_bins, histo_size));
checkCudaErrors(hipMalloc(&d_scanned, arr_size));
checkCudaErrors(hipMalloc(&d_moved, arr_size));
// just keep thread dim at 1024
dim3 thread_dim(1024 );
dim3 hist_block_dim(get_max_size(numElems, thread_dim.x));
// get number of elements
//printf("numElems %d\n", numElems);
debug_device_array("input", 100, d_inputVals, numElems);
for(unsigned int pass = 0; pass < 32; pass++) {
unsigned int one = 1;
/*if((one<<pass) <= 0) {
printf("breaking at pass %d ", pass);
break;
}*/
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
checkCudaErrors(hipMemset(d_scanned, 0, arr_size));
checkCudaErrors(hipMemset(d_outputVals, 0, arr_size));
checkCudaErrors(hipMemset(d_outputPos, 0, arr_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, pass, d_bins, d_inputVals, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy the histogram data to host
checkCudaErrors(hipMemcpy(&h_bins, d_bins, histo_size, hipMemcpyDeviceToHost));
printf("hey guys %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, (one<<pass));
// now we have 0, and 1 start position..
// get the scan of 1's
for(int i = 0; i < get_max_size(numElems, thread_dim.x); i++) {
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(dim3(1)), dim3(thread_dim), 0, 0,
pass,
d_inputVals,
d_scanned,
numElems,
i,
thread_dim.x
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//printf("made it past scanned\n");
//debug_device_array("input", 100, d_inputVals, numElems);
//debug_device_array("scanned", 100, d_scanned, numElems);
//verify_scan(d_inputVals, d_scanned, numElems, pass);
// calculate the move positions
hipLaunchKernelGGL(( move_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0,
pass,
d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_moved,
d_scanned,
h_bins[0],
numElems
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//debug_device_array("move", 100, d_moved, numElems);
//debug_device_array("output vals ", 100, d_outputVals, numElems);
//debug_device_array("output pos ", 100, d_outputPos, numElems);
// printf("made it past move calculation \n");
//finall
// copy the histogram data to input
checkCudaErrors(hipMemcpy(d_inputVals, d_outputVals, arr_size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_inputPos, d_outputPos, arr_size, hipMemcpyDeviceToDevice));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//printf("Made to end\n");
// debug_device_array("output vals ", 100000, d_outputVals, numElems);
// debug_device_array("output pos ", 100, d_outputPos, numElems);
checkCudaErrors(hipFree(d_moved));
checkCudaErrors(hipFree(d_scanned));
checkCudaErrors(hipFree(d_bins));
}
| 81d081ac76c2221848343ce21364594dfbf6d68d.cu | //Udacity HW 4
//Radix Sorting
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__
void histogram_kernel(unsigned int pass,
unsigned int * d_bins,
unsigned int* const d_input,
const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
unsigned int one = 1;
int bin = ((d_input[mid] & (one<<pass)) == (one<<pass)) ? 1 : 0;
if(bin)
atomicAdd(&d_bins[1], 1);
else
atomicAdd(&d_bins[0], 1);
}
// Let's run a single exclusive scan, but then when we move, for zero vals, let's take mid - val of scan there
__global__
void exclusive_scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize) {
int mid = threadIdx.x + threadSize * base;
int block = threadSize*base;
unsigned int one = 1;
if(mid >= size)
return;
unsigned int val = 0;
if(mid > 0)
val = ((d_inputVals[mid-1] & (one<<pass)) == (one<<pass)) ? 1 : 0;
else
val = 0;
d_output[mid] = val;
__syncthreads();
for(int s = 1; s <= threadSize; s *= 2) {
int spot = mid - s;
if(spot >= 0 && spot >= threadSize*base)
val = d_output[spot];
__syncthreads();
if(spot >= 0 && spot >= threadSize*base)
d_output[mid] += val;
__syncthreads();
}
if(base > 0)
d_output[mid] += d_output[base*threadSize - 1];
}
__global__
void test_kernel(unsigned int pass,
unsigned int * d_output,
size_t numElems)
{
int mid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int one=1;
unsigned int val = (unsigned int)mid;
if(mid < numElems) {
d_output[mid] = (val & (one<<pass)) == (one<<pass) ? 1 : 0;
}
}
void test(unsigned int pass) {
int numtest = 24;
unsigned int* d_out;
checkCudaErrors(cudaMalloc(&d_out, sizeof(unsigned int)*numtest));
unsigned int h_out[numtest];
test_kernel<<<dim3(1), dim3(numtest)>>>(pass, d_out, numtest);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&h_out, d_out, numtest*sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for(int i = 0; i< numtest; i++) {
printf("%d: %d, ", i, h_out[i]);
}
printf("\n");
checkCudaErrors(cudaFree(d_out));
}
__global__
void move_kernel(
unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* d_outputMove,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= numElems)
return;
unsigned int scan=0;
unsigned int base=0;
unsigned int one= 1;
if( ( d_inputVals[mid] & (one<<pass)) == (1<<pass)) {
scan = d_scanned[mid];
base = one_pos;
} else {
scan = (mid) - d_scanned[mid];
base = 0;
}
d_outputMove[mid] = base+scan;
d_outputPos[base+scan] = d_inputPos[mid];//d_inputPos[0];
d_outputVals[base+scan] = d_inputVals[mid];//base+scan;//d_inputVals[0];
}
int debug = 1;
void debug_device_array(char* name, int l, unsigned int * d_arr, int numElems) {
if(!debug)
return;
unsigned int h_arr[l];
checkCudaErrors(cudaMemcpy(&h_arr, d_arr, l*sizeof(unsigned int), cudaMemcpyDeviceToHost));
printf(name);
printf(" ");
for(int i=0; i < l; i++) {
printf("%d ", h_arr[i]);
}
printf("\n");
unsigned int max = 0;
unsigned int min = 1000000;
unsigned int h_arr2[numElems];
checkCudaErrors(cudaMemcpy(&h_arr2, d_arr, numElems*sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for(int i = 0; i < numElems; i++) {
if(h_arr2[i] < min)
min = h_arr2[i];
if(h_arr2[i] > max)
max = h_arr2[i];
}
printf("max %d min %d\n", max, min);
}
void verify_scan(unsigned int * d_arr, unsigned int * d_scanned, int numElems, int pass) {
unsigned int h_arr[3000];
unsigned int one =1;
unsigned int h_scanned[3000];
checkCudaErrors(cudaMemcpy(&h_arr, d_arr, 3000*sizeof(unsigned int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&h_scanned, d_scanned, 3000*sizeof(unsigned int), cudaMemcpyDeviceToHost));
unsigned int acc = 0;
for(int i = 0; i < 3000; i++) {
if(acc != h_scanned[i]) {
printf("wrong at %d %d != %d\n", i, acc, h_scanned[i]);
}
acc+= ((h_arr[i] & (one<<pass)) == (one<<pass)) ? 1 : 0;
}
}
/*
void verify_sort(unsigned int * d_sorted, int numElems, int pass) {
unsigned int h_sorted[3000];
checkCudaErrors(cudaMemcpy(&h_sorted, d_sorted, 3000*sizeof(unsigned int), cudaMemcpyDeviceToHost));
unsigned int last = h_scanned[0];
for(int i = 1; i < 3000; i++) {
if(acc != h_scanned[i]) {
printf("wrong at %d %d != %d\n", i, acc, h_scanned[i]);
}
acc+= ((h_arr[i] & (1<<pass) == (1<<pass)) ? 1 : 0);
}
}*/
// referred this online as it gives you a good max size for n/d
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
/*for(unsigned int i = 0; i < 32; i++) {
printf("for testing %d \n", i);
test(i);
}*/
//return;
unsigned int* d_bins;
unsigned int h_bins[2];
unsigned int* d_scanned;
unsigned int* d_moved;
const size_t histo_size = 2*sizeof(unsigned int);
const size_t arr_size = numElems*sizeof(unsigned int);
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMalloc(&d_scanned, arr_size));
checkCudaErrors(cudaMalloc(&d_moved, arr_size));
// just keep thread dim at 1024
dim3 thread_dim(1024 );
dim3 hist_block_dim(get_max_size(numElems, thread_dim.x));
// get number of elements
//printf("numElems %d\n", numElems);
debug_device_array("input", 100, d_inputVals, numElems);
for(unsigned int pass = 0; pass < 32; pass++) {
unsigned int one = 1;
/*if((one<<pass) <= 0) {
printf("breaking at pass %d ", pass);
break;
}*/
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
checkCudaErrors(cudaMemset(d_scanned, 0, arr_size));
checkCudaErrors(cudaMemset(d_outputVals, 0, arr_size));
checkCudaErrors(cudaMemset(d_outputPos, 0, arr_size));
histogram_kernel<<<hist_block_dim, thread_dim>>>(pass, d_bins, d_inputVals, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy the histogram data to host
checkCudaErrors(cudaMemcpy(&h_bins, d_bins, histo_size, cudaMemcpyDeviceToHost));
printf("hey guys %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, (one<<pass));
// now we have 0, and 1 start position..
// get the scan of 1's
for(int i = 0; i < get_max_size(numElems, thread_dim.x); i++) {
exclusive_scan_kernel<<<dim3(1), thread_dim>>>(
pass,
d_inputVals,
d_scanned,
numElems,
i,
thread_dim.x
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//printf("made it past scanned\n");
//debug_device_array("input", 100, d_inputVals, numElems);
//debug_device_array("scanned", 100, d_scanned, numElems);
//verify_scan(d_inputVals, d_scanned, numElems, pass);
// calculate the move positions
move_kernel<<<hist_block_dim, thread_dim>>>(
pass,
d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_moved,
d_scanned,
h_bins[0],
numElems
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//debug_device_array("move", 100, d_moved, numElems);
//debug_device_array("output vals ", 100, d_outputVals, numElems);
//debug_device_array("output pos ", 100, d_outputPos, numElems);
// printf("made it past move calculation \n");
//finall
// copy the histogram data to input
checkCudaErrors(cudaMemcpy(d_inputVals, d_outputVals, arr_size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_inputPos, d_outputPos, arr_size, cudaMemcpyDeviceToDevice));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//printf("Made to end\n");
// debug_device_array("output vals ", 100000, d_outputVals, numElems);
// debug_device_array("output pos ", 100, d_outputPos, numElems);
checkCudaErrors(cudaFree(d_moved));
checkCudaErrors(cudaFree(d_scanned));
checkCudaErrors(cudaFree(d_bins));
}
|
68f34873d9a87e4f46f241d75653573e4cfe77d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Written by:
* Iwona Kotlarska, ukasz Kondraciuk
* University of Warsaw
* 2019 - port to CUDA for SC19 student cluster competition
*
*/
#include <hip/hip_runtime.h>
#include "rho_p.h"
#include "utils.h"
__global__ void rho_p_kernel(const particle_t* p,
const float q_8V,
const int np,
const int sy,
const int sz,
field_t* f) {
float w0, w1, w2, w3, w4, w5, w6, w7, dz;
int v;
int n = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (; n < np; n += stride) {
// After detailed experiments and studying of assembly dumps, it was
// determined that if the platform does not support efficient 4-vector
// SIMD memory gather/scatter operations, the savings from using
// "trilinear" are slightly outweighed by the overhead of the
// gather/scatters.
// Load the particle data
w0 = p[n].dx;
w1 = p[n].dy;
dz = p[n].dz;
v = p[n].i;
w7 = p[n].w * q_8V;
// Compute the trilinear weights
// Though the PPE should have hardware fma/fmaf support, it was
// measured to be more efficient _not_ to use it here. (Maybe the
// compiler isn't actually generating the assembly for it.
#define FMA(x, y, z) ((z) + (x) * (y))
#define FNMS(x, y, z) ((z) - (x) * (y))
w6 = FNMS(w0, w7, w7); // q(1-dx)
w7 = FMA(w0, w7, w7); // q(1+dx)
w4 = FNMS(w1, w6, w6);
w5 = FNMS(w1, w7, w7); // q(1-dx)(1-dy), q(1+dx)(1-dy)
w6 = FMA(w1, w6, w6);
w7 = FMA(w1, w7, w7); // q(1-dx)(1+dy), q(1+dx)(1+dy)
w0 = FNMS(dz, w4, w4);
w1 = FNMS(dz, w5, w5);
w2 = FNMS(dz, w6, w6);
w3 = FNMS(dz, w7, w7);
w4 = FMA(dz, w4, w4);
w5 = FMA(dz, w5, w5);
w6 = FMA(dz, w6, w6);
w7 = FMA(dz, w7, w7);
#undef FNMS
#undef FMA
// Reduce the particle charge to rhof
atomicAdd(&f[v].rhof, w0);
atomicAdd(&f[v + 1].rhof, w1);
atomicAdd(&f[v + sy].rhof, w2);
atomicAdd(&f[v + sy + 1].rhof, w3);
atomicAdd(&f[v + sz].rhof, w4);
atomicAdd(&f[v + sz + 1].rhof, w5);
atomicAdd(&f[v + sz + sy].rhof, w6);
atomicAdd(&f[v + sz + sy + 1].rhof, w7);
}
}
void rho_p_cuda(const particle_t* p,
const float q_8V,
const int np,
const int sy,
const int sz,
field_t* f) {
hipLaunchKernelGGL(( rho_p_kernel), dim3(1024), dim3(1024), 0, 0, p, q_8V, np, sy, sz, f);
}
| 68f34873d9a87e4f46f241d75653573e4cfe77d9.cu | /*
* Written by:
* Iwona Kotlarska, Łukasz Kondraciuk
* University of Warsaw
* 2019 - port to CUDA for SC19 student cluster competition
*
*/
#include <cuda_runtime.h>
#include "rho_p.h"
#include "utils.h"
__global__ void rho_p_kernel(const particle_t* p,
const float q_8V,
const int np,
const int sy,
const int sz,
field_t* f) {
float w0, w1, w2, w3, w4, w5, w6, w7, dz;
int v;
int n = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (; n < np; n += stride) {
// After detailed experiments and studying of assembly dumps, it was
// determined that if the platform does not support efficient 4-vector
// SIMD memory gather/scatter operations, the savings from using
// "trilinear" are slightly outweighed by the overhead of the
// gather/scatters.
// Load the particle data
w0 = p[n].dx;
w1 = p[n].dy;
dz = p[n].dz;
v = p[n].i;
w7 = p[n].w * q_8V;
// Compute the trilinear weights
// Though the PPE should have hardware fma/fmaf support, it was
// measured to be more efficient _not_ to use it here. (Maybe the
// compiler isn't actually generating the assembly for it.
#define FMA(x, y, z) ((z) + (x) * (y))
#define FNMS(x, y, z) ((z) - (x) * (y))
w6 = FNMS(w0, w7, w7); // q(1-dx)
w7 = FMA(w0, w7, w7); // q(1+dx)
w4 = FNMS(w1, w6, w6);
w5 = FNMS(w1, w7, w7); // q(1-dx)(1-dy), q(1+dx)(1-dy)
w6 = FMA(w1, w6, w6);
w7 = FMA(w1, w7, w7); // q(1-dx)(1+dy), q(1+dx)(1+dy)
w0 = FNMS(dz, w4, w4);
w1 = FNMS(dz, w5, w5);
w2 = FNMS(dz, w6, w6);
w3 = FNMS(dz, w7, w7);
w4 = FMA(dz, w4, w4);
w5 = FMA(dz, w5, w5);
w6 = FMA(dz, w6, w6);
w7 = FMA(dz, w7, w7);
#undef FNMS
#undef FMA
// Reduce the particle charge to rhof
atomicAdd(&f[v].rhof, w0);
atomicAdd(&f[v + 1].rhof, w1);
atomicAdd(&f[v + sy].rhof, w2);
atomicAdd(&f[v + sy + 1].rhof, w3);
atomicAdd(&f[v + sz].rhof, w4);
atomicAdd(&f[v + sz + 1].rhof, w5);
atomicAdd(&f[v + sz + sy].rhof, w6);
atomicAdd(&f[v + sz + sy + 1].rhof, w7);
}
}
void rho_p_cuda(const particle_t* p,
const float q_8V,
const int np,
const int sy,
const int sz,
field_t* f) {
rho_p_kernel<<<1024, 1024>>>(p, q_8V, np, sy, sz, f);
}
|
2b9c6b2c035ff12b02e352cd6434ade69475fd88.hip | // !!! This is a file automatically generated by hipify!!!
//
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include "h_code/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "h_code/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[]);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=0;
std::ofstream timedata;
std::ofstream numbods;
numbods.open("numbods.mtx");
timedata.open("graph_gpu.mtx");
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =0.005; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
//myfile << "\n";
hipEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
//myfile << inits[i]<<" ";
}
//myfile << "\n";
}
hipEventRecord( endEvent, 0 );
hipEventSynchronize( endEvent );
float timeValue;
hipEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
numbods<<n<<" ";
if ( hipSuccess != hipGetLastError() )
printf( "Error!\n" );
std::cout << n << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
//myfile.close();
//myfile2.close();
//std::cout<<n<<std::endl;
}
numbods.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
| 2b9c6b2c035ff12b02e352cd6434ade69475fd88.cu | //
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <cuda.h>
#include <iostream>
#include <math.h>
#include "h_code/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "h_code/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[]);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=0;
std::ofstream timedata;
std::ofstream numbods;
numbods.open("numbods.mtx");
timedata.open("graph_gpu.mtx");
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =0.005; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
//myfile << "\n";
cudaEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
//myfile << inits[i]<<" ";
}
//myfile << "\n";
}
cudaEventRecord( endEvent, 0 );
cudaEventSynchronize( endEvent );
float timeValue;
cudaEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
numbods<<n<<" ";
if ( cudaSuccess != cudaGetLastError() )
printf( "Error!\n" );
std::cout << n << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
//myfile.close();
//myfile2.close();
//std::cout<<n<<std::endl;
}
numbods.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
|
08aa99b6d94bd3485128dbfe4eda4211b89ea389.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/channel_shuffle_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y) {
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (offset < HxW) {
#if __CUDA_ARCH__ >= 350
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
#else
Y[(n * C + blockIdx.z) * HxW + offset] =
X[(n * C + g * K + k) * HxW + offset];
#endif
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) {
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
#if __CUDA_ARCH__ >= 350
sdata[i] = __ldg(X + offset + i);
#else
sdata[i] = X[offset + i];
#endif
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, HxW, X_data, Y_data);
} else {
const dim3 dim_grid(N, S, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, HxW, X_data, Y_data);
}
return true;
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int ndim = X.ndim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int outer_size = N * HxW;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (C <= 32) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else if (C <= 128) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else if (C <= 512) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else {
const std::array<int, 3> dims = {N * HxW, G, K};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), X_data, Y_data, &context_);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, HxW, dY_data, dX_data);
} else {
const dim3 dim_grid(N, S, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, HxW, dY_data, dX_data);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int ndim = dY.ndim();
const int N = dY.dim32(0);
const int C = dY.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int outer_size = N * HxW;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (C <= 32) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else if (C <= 128) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else if (C <= 512) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else {
const std::array<int, 3> dims = {N * HxW, K, G};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), dY_data, dX_data, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ChannelShuffleGradient,
ChannelShuffleGradientOp<float, CUDAContext>);
} // namespace caffe2
| 08aa99b6d94bd3485128dbfe4eda4211b89ea389.cu | #include "caffe2/operators/channel_shuffle_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y) {
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (offset < HxW) {
#if __CUDA_ARCH__ >= 350
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
#else
Y[(n * C + blockIdx.z) * HxW + offset] =
X[(n * C + g * K + k) * HxW + offset];
#endif
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) {
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
#if __CUDA_ARCH__ >= 350
sdata[i] = __ldg(X + offset + i);
#else
sdata[i] = X[offset + i];
#endif
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
ChannelShuffleNCHWKernel<float, false>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, HxW, X_data, Y_data);
} else {
const dim3 dim_grid(N, S, C);
ChannelShuffleNCHWKernel<float, true>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, HxW, X_data, Y_data);
}
return true;
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int ndim = X.ndim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int outer_size = N * HxW;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (C <= 32) {
ChannelShuffleNHWCKernel<float, 32>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else if (C <= 128) {
ChannelShuffleNHWCKernel<float, 128>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else if (C <= 512) {
ChannelShuffleNHWCKernel<float, 512>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else {
const std::array<int, 3> dims = {N * HxW, G, K};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), X_data, Y_data, &context_);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
ChannelShuffleNCHWKernel<float, false>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, HxW, dY_data, dX_data);
} else {
const dim3 dim_grid(N, S, C);
ChannelShuffleNCHWKernel<float, true>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, HxW, dY_data, dX_data);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int ndim = dY.ndim();
const int N = dY.dim32(0);
const int C = dY.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int outer_size = N * HxW;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (C <= 32) {
ChannelShuffleNHWCKernel<float, 32>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else if (C <= 128) {
ChannelShuffleNHWCKernel<float, 128>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else if (C <= 512) {
ChannelShuffleNHWCKernel<float, 512>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else {
const std::array<int, 3> dims = {N * HxW, K, G};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), dY_data, dX_data, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ChannelShuffleGradient,
ChannelShuffleGradientOp<float, CUDAContext>);
} // namespace caffe2
|
7e9fcd686a2b4d00e83cdae5909673aa48b173ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "RecurrentWeightsRTRLDerivativesKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *previousHiddenActivations = NULL;
hipMalloc(&previousHiddenActivations, XSIZE*YSIZE);
float *hiddenActivationDerivatives = NULL;
hipMalloc(&hiddenActivationDerivatives, XSIZE*YSIZE);
float *recurrentWeights = NULL;
hipMalloc(&recurrentWeights, XSIZE*YSIZE);
float *recurrentWeightRTRLDerivatives = NULL;
hipMalloc(&recurrentWeightRTRLDerivatives, XSIZE*YSIZE);
float *previousRecurrentWeightRTRLDerivatives = NULL;
hipMalloc(&previousRecurrentWeightRTRLDerivatives, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
RecurrentWeightsRTRLDerivativesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
RecurrentWeightsRTRLDerivativesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
RecurrentWeightsRTRLDerivativesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e9fcd686a2b4d00e83cdae5909673aa48b173ce.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "RecurrentWeightsRTRLDerivativesKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *previousHiddenActivations = NULL;
cudaMalloc(&previousHiddenActivations, XSIZE*YSIZE);
float *hiddenActivationDerivatives = NULL;
cudaMalloc(&hiddenActivationDerivatives, XSIZE*YSIZE);
float *recurrentWeights = NULL;
cudaMalloc(&recurrentWeights, XSIZE*YSIZE);
float *recurrentWeightRTRLDerivatives = NULL;
cudaMalloc(&recurrentWeightRTRLDerivatives, XSIZE*YSIZE);
float *previousRecurrentWeightRTRLDerivatives = NULL;
cudaMalloc(&previousRecurrentWeightRTRLDerivatives, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
RecurrentWeightsRTRLDerivativesKernel<<<gridBlock,threadBlock>>>(previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
RecurrentWeightsRTRLDerivativesKernel<<<gridBlock,threadBlock>>>(previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
RecurrentWeightsRTRLDerivativesKernel<<<gridBlock,threadBlock>>>(previousHiddenActivations,hiddenActivationDerivatives,recurrentWeights,recurrentWeightRTRLDerivatives,previousRecurrentWeightRTRLDerivatives);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
71eb01c0d5f4daed22357784c7118674f3ed08f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const { _buffer->syncToSpecial(); }
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), i, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, const char direction, NDArray* target) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(target == nullptr)
target = this;
if(!isSameShape(target) && !(rankOf() == 1 && target->rankOf() == 2 && sizeAt(0) == target->sizeAt(0) && sizeAt(0) == target->sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target->sizeAt(-2);
else if (direction == 'l')
upper = target->sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target->getShapeInfo())) * target->rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({target}, {this});
hipLaunchKernelGGL(( fillAsTriangularCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), target->getPlatformBuffer(), target->getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void NDArray::fillAsTriangular, (const float val, int lower, int upper, const char direction, NDArray* target), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), i, len, coords);
const auto offset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, rank);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
hipLaunchKernelGGL(( identityMatrixCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = hipStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
hipMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), hipMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
int product = 1;
for(const auto& item : reps)
product *= item;
if(product == 0)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES, LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES, LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by reps
NDArray* NDArray::repeat(int dimension, const std::vector<Nd4jLong>& repeats) const {
auto outShape = ShapeUtils::evalRepeatShape(dimension, repeats, *this);
// the size of outShape == rank
int rank = rankOf(); // = outShape.size()
std::vector<Nd4jLong> newShape(rank);
for (int i = 0; i < rank; i++)
newShape[i] = outShape[i];
auto ret = new NDArray('c', outShape, dataType(), getContext());
auto repeatDelta = shape::prodLong(newShape.data(), rank) / this->lengthOf();
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rankOf(), {dimension});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(getShapeInfo(), dimsToExclude); //this->tensorsAlongDimension({dimension});
std::vector<int> copy({dimension});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(this->getShapeInfo(), copy);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(ret->getShapeInfo(), copy);
prepareSpecialUse({ret}, {this});
auto stream = getContext()->getCudaStream();
BUILD_SINGLE_SELECTOR(dataType(), repeatKernelH, (getSpecialBuffer(), ret->getSpecialBuffer(), numTads, lengthOf(), ret->lengthOf(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), *stream), LIBND4J_TYPES);
registerSpecialUse({ret}, {this});
return ret;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by reps
void NDArray::repeat(int dimension, NDArray& target) const {
if(dimension < 0)
dimension += rankOf();
if(rankOf() != target.rankOf())
throw std::invalid_argument("NDArray::repeat(int dimension, NDArray& target) method: wrong rank of target array it must be equal to this array rank!");
Nd4jLong repeatDelta = target.sizeAt(dimension) / sizeAt(dimension);
if(repeatDelta == 0)
throw std::invalid_argument("NDArray::repeat(int dimension, NDArray& target) method: wrong shape of target array!");
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rankOf(), {dimension});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(getShapeInfo(), dimsToExclude);
std::vector<int> copy({dimension});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(this->getShapeInfo(), copy);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(target.getShapeInfo(), copy);
NDArray::prepareSpecialUse({&target}, {this});
auto stream = getContext()->getCudaStream();
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), repeatKernelHH, (getSpecialBuffer(), target.getSpecialBuffer(), numTads, lengthOf(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), *stream), LIBND4J_TYPES, LIBND4J_TYPES);
NDArray::registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
hipMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), hipMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
hipMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, hipMemcpyDeviceToHost, *getContext()->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: hipStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__HIPCC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
| 71eb01c0d5f4daed22357784c7118674f3ed08f7.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const { _buffer->syncToSpecial(); }
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), i, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, const char direction, NDArray* target) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(target == nullptr)
target = this;
if(!isSameShape(target) && !(rankOf() == 1 && target->rankOf() == 2 && sizeAt(0) == target->sizeAt(0) && sizeAt(0) == target->sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target->sizeAt(-2);
else if (direction == 'l')
upper = target->sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target->getShapeInfo())) * target->rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({target}, {this});
fillAsTriangularCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *getContext()->getCudaStream()>>>(getPlatformBuffer(), getPlatformShapeInfo(), target->getPlatformBuffer(), target->getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void NDArray::fillAsTriangular, (const float val, int lower, int upper, const char direction, NDArray* target), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), i, len, coords);
const auto offset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, rank);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
identityMatrixCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = cudaStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
cudaMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), cudaMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
int product = 1;
for(const auto& item : reps)
product *= item;
if(product == 0)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES, LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES, LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by reps
NDArray* NDArray::repeat(int dimension, const std::vector<Nd4jLong>& repeats) const {
auto outShape = ShapeUtils::evalRepeatShape(dimension, repeats, *this);
// the size of outShape == rank
int rank = rankOf(); // = outShape.size()
std::vector<Nd4jLong> newShape(rank);
for (int i = 0; i < rank; i++)
newShape[i] = outShape[i];
auto ret = new NDArray('c', outShape, dataType(), getContext());
auto repeatDelta = shape::prodLong(newShape.data(), rank) / this->lengthOf();
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rankOf(), {dimension});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(getShapeInfo(), dimsToExclude); //this->tensorsAlongDimension({dimension});
std::vector<int> copy({dimension});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(this->getShapeInfo(), copy);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(ret->getShapeInfo(), copy);
prepareSpecialUse({ret}, {this});
auto stream = getContext()->getCudaStream();
BUILD_SINGLE_SELECTOR(dataType(), repeatKernelH, (getSpecialBuffer(), ret->getSpecialBuffer(), numTads, lengthOf(), ret->lengthOf(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), *stream), LIBND4J_TYPES);
registerSpecialUse({ret}, {this});
return ret;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by reps
void NDArray::repeat(int dimension, NDArray& target) const {
if(dimension < 0)
dimension += rankOf();
if(rankOf() != target.rankOf())
throw std::invalid_argument("NDArray::repeat(int dimension, NDArray& target) method: wrong rank of target array it must be equal to this array rank!");
Nd4jLong repeatDelta = target.sizeAt(dimension) / sizeAt(dimension);
if(repeatDelta == 0)
throw std::invalid_argument("NDArray::repeat(int dimension, NDArray& target) method: wrong shape of target array!");
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rankOf(), {dimension});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(getShapeInfo(), dimsToExclude);
std::vector<int> copy({dimension});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(this->getShapeInfo(), copy);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(target.getShapeInfo(), copy);
NDArray::prepareSpecialUse({&target}, {this});
auto stream = getContext()->getCudaStream();
BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), repeatKernelHH, (getSpecialBuffer(), target.getSpecialBuffer(), numTads, lengthOf(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), *stream), LIBND4J_TYPES, LIBND4J_TYPES);
NDArray::registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
cudaMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), cudaMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
cudaMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, cudaMemcpyDeviceToHost, *getContext()->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: cudaStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__CUDACC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
|
3eae33b3b86416b2ad464079bd2504555ee1459c.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/08/2016)
///sta versin sirve para encontrar en matlab el nmero de iteraciones necesario, considerando (RADIX-3) N = 729, Li = {3,9,27,...,729} y Lo = N
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 500;
///Ingrese el nmero de muestras requeridas
const int muestras = 500;
///Ingrese el valor de N_max
const int N_max = 6;
///Ingrese el valor de Li_max
const int Lo_max = N_max;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma,suma_acomulada;
float promedio[muestras];
float tiempos_acomulado[loop];
float promedio_acomulado[loop];
FILE *da;
FILE *db;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
//Inicializaciones
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(3,i_N);
printf("\n N = %d \n",N);
for(j_res=1;j_res <= N_max;j_res++)
{
Li=(int )pow(3,j_res);
for(k_res=(int )pow(3,Lo_max);k_res <= (int )pow(3,Lo_max);k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
switch (j_res)
{
case 1:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li3_LoN.bin","a+b");
db = fopen("Promedios_N729_Li3_LoN.bin","a+b");
break;
case 2:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li9_LoN.bin","a+b");
db = fopen("Promedios_N729_Li9_LoN.bin","a+b");
break;
case 3:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li27_LoN.bin","a+b");
db = fopen("Promedios_N729_Li27_LoN.bin","a+b");
break;
case 4:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li81_LoN.bin","a+b");
db = fopen("Promedios_N729_Li81_LoN.bin","a+b");
break;
case 5:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li243_LoN.bin","a+b");
db = fopen("Promedios_N729_Li243_LoN.bin","a+b");
break;
case 6:
///Se crean los archivos binarios donde se guardarn los datos
da = fopen("Acomulados_N729_Li729_LoN.bin","a+b");
db = fopen("Promedios_N729_Li729_LoN.bin","a+b");
break;
default:
break;
}
for(i=1;i<=muestras;i++)
{
suma=0.0;
cont = 0;
suma_acomulada = 0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
if(i == 1)
{
tiempos_acomulado[cont] = elapsedTime_app;
for(i_prom = 0; i_prom <= cont; i_prom++)
{
suma_acomulada = suma_acomulada + tiempos_acomulado[i_prom];
}
promedio_acomulado[cont] = suma_acomulada/(float)(cont+1);
cont++;
suma_acomulada = 0.0;
}
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[i-1] = suma/(float)loop;
fwrite(promedio_acomulado,sizeof(float),loop,da);
fclose(da);
}
fwrite(promedio,sizeof(float),muestras,db);
fclose(db);
//printf(" \n\n%d - Tiempo promedio acomulado para N = %ld >>> %f mS\n",i,N,promedio_acomulado[499]);
//printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[0]);
}
}
}
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
| 3eae33b3b86416b2ad464079bd2504555ee1459c.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/08/2016)
///Ésta versión sirve para encontrar en matlab el número de iteraciones necesario, considerando (RADIX-3) N = 729, Li = {3,9,27,...,729} y Lo = N
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 500;
///Ingrese el número de muestras requeridas
const int muestras = 500;
///Ingrese el valor de N_max
const int N_max = 6;
///Ingrese el valor de Li_max
const int Lo_max = N_max;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma,suma_acomulada;
float promedio[muestras];
float tiempos_acomulado[loop];
float promedio_acomulado[loop];
FILE *da;
FILE *db;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
//Inicializaciones
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(3,i_N);
printf("\n N = %d \n",N);
for(j_res=1;j_res <= N_max;j_res++)
{
Li=(int )pow(3,j_res);
for(k_res=(int )pow(3,Lo_max);k_res <= (int )pow(3,Lo_max);k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
switch (j_res)
{
case 1:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li3_LoN.bin","a+b");
db = fopen("Promedios_N729_Li3_LoN.bin","a+b");
break;
case 2:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li9_LoN.bin","a+b");
db = fopen("Promedios_N729_Li9_LoN.bin","a+b");
break;
case 3:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li27_LoN.bin","a+b");
db = fopen("Promedios_N729_Li27_LoN.bin","a+b");
break;
case 4:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li81_LoN.bin","a+b");
db = fopen("Promedios_N729_Li81_LoN.bin","a+b");
break;
case 5:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li243_LoN.bin","a+b");
db = fopen("Promedios_N729_Li243_LoN.bin","a+b");
break;
case 6:
///Se crean los archivos binarios donde se guardarán los datos
da = fopen("Acomulados_N729_Li729_LoN.bin","a+b");
db = fopen("Promedios_N729_Li729_LoN.bin","a+b");
break;
default:
break;
}
for(i=1;i<=muestras;i++)
{
suma=0.0;
cont = 0;
suma_acomulada = 0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
if(i == 1)
{
tiempos_acomulado[cont] = elapsedTime_app;
for(i_prom = 0; i_prom <= cont; i_prom++)
{
suma_acomulada = suma_acomulada + tiempos_acomulado[i_prom];
}
promedio_acomulado[cont] = suma_acomulada/(float)(cont+1);
cont++;
suma_acomulada = 0.0;
}
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[i-1] = suma/(float)loop;
fwrite(promedio_acomulado,sizeof(float),loop,da);
fclose(da);
}
fwrite(promedio,sizeof(float),muestras,db);
fclose(db);
//printf(" \n\n%d - Tiempo promedio acomulado para N = %ld >>> %f mS\n",i,N,promedio_acomulado[499]);
//printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[0]);
}
}
}
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
4c8de5800312ef65a61e44b21c65bb3670535366.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightTLeftkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg3, float *dev_recon)
{
int ind0, indg[3],q;
int k=blockIdx.x*blockDim.x + threadIdx.x;
if (k>=num_slices)
return;
ind0 = k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0+num_grid;
indg[2] = ind0+num_grid+1;
for (q = 0; q < 3; q++) {
dev_F[ind0] += 2*beta*dev_wg3[q];
dev_G[ind0] -= 2*beta*dev_wg3[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} | 4c8de5800312ef65a61e44b21c65bb3670535366.cu | #include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightTLeftkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg3, float *dev_recon)
{
int ind0, indg[3],q;
int k=blockIdx.x*blockDim.x + threadIdx.x;
if (k>=num_slices)
return;
ind0 = k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0+num_grid;
indg[2] = ind0+num_grid+1;
for (q = 0; q < 3; q++) {
dev_F[ind0] += 2*beta*dev_wg3[q];
dev_G[ind0] -= 2*beta*dev_wg3[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} |
a8e2b1768b9b79e230a1c151db1d4836ee8baf54.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Name : popsicle-stick.cpp
// Author : Matthew Hanley
// Version :
// Copyright :
// Description : Hello World in C++, Ansi-style
//============================================================================
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "gpu_err_check.h"
// Number of iterations to run
const int N = 5000000;
// size of grid dim
const int m = 10;
__host__ __device__ double rollingAverage(double cur_avg, double new_sample, int cur_n){
// n is the number of data points prior to new_sample being added
cur_avg = (new_sample + ((double) cur_n) * cur_avg)/(cur_n+1);
return cur_avg;
}
__global__ void setup_kernel(hiprandState_t *state, unsigned long long seed){
int tid = threadIdx.x+blockDim.x*blockIdx.x;
int stride = tid;
int stride_step = gridDim.x * blockDim.x;
while(stride < N){
hiprand_init(seed, tid, 0, &state[stride]);
stride += stride_step;
}
}
__global__ void monte_carlo_kernel(int* draws, hiprandState_t* state, double* average){
// adding shared memory
extern __shared__ int grid[];
int tid = blockDim.x * blockIdx.x + threadIdx.x; // thread index
// set up striding
int stride = tid; // init stride to current tid
int stride_step = gridDim.x * blockDim.x; // step size is equal to grid size (in unit of threads)
// =====================================================================================================================
// Randomizing the Grid
// =====================================================================================================================
int random_number; // variable to hold random number
int tmp; // variable to hold element for swapping
int j; // variable for finding index to swap with
int start_index;// where the loop should start
// The arrays were generated all at once,
// thus each thread has its own special location.
while(stride < N){ // make sure that computation is needed
hiprandState_t localState = state[stride];
start_index = stride * m * m;
// populate sequential numbers
for(int i = 0; i < m * m; i++){
draws[start_index + i] = i;
}
for(int i = m*m-1; i>=0; i--){
random_number = (int) truncf( hiprand_uniform(&localState)*100000 ); // generate random number and make it big
j = (random_number) % (i+1); // get the index of a number to swap with that is less than i
// perform the swap
tmp = draws[start_index + j];
draws[start_index + j] = draws[start_index + i];
draws[start_index + i] = tmp;
}
stride += stride_step;
}
__syncthreads();
// reset stride to tid
stride = tid;
// =====================================================================================================================
int n = 0;
double local_average = 0;
__syncthreads(); // wait for all threads to catch up.
while (stride < N){
int win = 0;
int draw;
start_index = stride * m * m;
int n_draws = 0;
for (int i = 0; i < m*m; i++){
grid[threadIdx.x * m * m + i] = 0;
}
// simulating stick drawing, a sequential process in this case.
while (win == 0){
draw = draws[start_index + n_draws];
grid[threadIdx.x * m * m + draw] = 1;
int col = draw % m;
int row = draw / m;
int row_count = 0;
int col_count = 0;
for(j=0; j<m; j++){
col_count += grid[threadIdx.x * m * m + j * m + col];
}
for(j=row*m; j<row * m + m; j++){
row_count += grid[threadIdx.x * m * m + j];
}
if (col_count >= m || row_count >= m){
win = 1;
// printf("ROW: %d, COL: %d\n", row, col);
}
n_draws++;
}
n++;
local_average = rollingAverage(local_average, (double) n_draws, n);
stride += stride_step;
}
average[tid] = local_average;
__syncthreads();
}
int main() {
hipFree(0); // avoid spoofing profiler.
clock_t begin = clock();
// srand(time(0));
// Init variables
double* average;
int* d_draws;
double* d_average;
// create and allocate for the random state
hiprandState_t *d_state;
cudaSafeCall( hipMalloc(&d_state, N * sizeof(hiprandState_t)) );
int threads_per_block = 32;
// int n_blocks = (N + threads_per_block - 1) / threads_per_block;
int n_blocks = 900;
// allocate space on host
average = (double*) malloc(threads_per_block * n_blocks * sizeof(double));
// allocate space on the device for lots of popsicle sticks
cudaSafeCall( hipMalloc((void**) &d_draws, m * m * sizeof(int) * N) );
cudaSafeCall( hipMalloc((void**) &d_average, sizeof(double) * threads_per_block * n_blocks) );
cudaCheckError();
hipLaunchKernelGGL(( setup_kernel), dim3(1000), dim3(threads_per_block), 0, 0, d_state, (unsigned long long) time(NULL));
cudaCheckError();
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( monte_carlo_kernel), dim3(n_blocks), dim3(threads_per_block), m * m * threads_per_block * sizeof(int), 0, d_draws, d_state, d_average);
cudaCheckError();
cudaSafeCall( hipDeviceSynchronize() );
// copy data to host for analysis.
cudaSafeCall( hipMemcpy(average, d_average, threads_per_block * n_blocks * sizeof(double), hipMemcpyDeviceToHost) );
double big_avg = 0;
for (int i = 0; i < threads_per_block * n_blocks; i++){
big_avg = rollingAverage(big_avg, average[i], i);
}
printf("Average: %f\n", big_avg);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("TIME: %f\n", time_spent);
hipFree(d_draws);
return 0;
}
| a8e2b1768b9b79e230a1c151db1d4836ee8baf54.cu | //============================================================================
// Name : popsicle-stick.cpp
// Author : Matthew Hanley
// Version :
// Copyright :
// Description : Hello World in C++, Ansi-style
//============================================================================
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include "gpu_err_check.h"
// Number of iterations to run
const int N = 5000000;
// size of grid dim
const int m = 10;
__host__ __device__ double rollingAverage(double cur_avg, double new_sample, int cur_n){
// n is the number of data points prior to new_sample being added
cur_avg = (new_sample + ((double) cur_n) * cur_avg)/(cur_n+1);
return cur_avg;
}
__global__ void setup_kernel(curandState *state, unsigned long long seed){
int tid = threadIdx.x+blockDim.x*blockIdx.x;
int stride = tid;
int stride_step = gridDim.x * blockDim.x;
while(stride < N){
curand_init(seed, tid, 0, &state[stride]);
stride += stride_step;
}
}
__global__ void monte_carlo_kernel(int* draws, curandState* state, double* average){
// adding shared memory
extern __shared__ int grid[];
int tid = blockDim.x * blockIdx.x + threadIdx.x; // thread index
// set up striding
int stride = tid; // init stride to current tid
int stride_step = gridDim.x * blockDim.x; // step size is equal to grid size (in unit of threads)
// =====================================================================================================================
// Randomizing the Grid
// =====================================================================================================================
int random_number; // variable to hold random number
int tmp; // variable to hold element for swapping
int j; // variable for finding index to swap with
int start_index;// where the loop should start
// The arrays were generated all at once,
// thus each thread has its own special location.
while(stride < N){ // make sure that computation is needed
curandState localState = state[stride];
start_index = stride * m * m;
// populate sequential numbers
for(int i = 0; i < m * m; i++){
draws[start_index + i] = i;
}
for(int i = m*m-1; i>=0; i--){
random_number = (int) truncf( curand_uniform(&localState)*100000 ); // generate random number and make it big
j = (random_number) % (i+1); // get the index of a number to swap with that is less than i
// perform the swap
tmp = draws[start_index + j];
draws[start_index + j] = draws[start_index + i];
draws[start_index + i] = tmp;
}
stride += stride_step;
}
__syncthreads();
// reset stride to tid
stride = tid;
// =====================================================================================================================
int n = 0;
double local_average = 0;
__syncthreads(); // wait for all threads to catch up.
while (stride < N){
int win = 0;
int draw;
start_index = stride * m * m;
int n_draws = 0;
for (int i = 0; i < m*m; i++){
grid[threadIdx.x * m * m + i] = 0;
}
// simulating stick drawing, a sequential process in this case.
while (win == 0){
draw = draws[start_index + n_draws];
grid[threadIdx.x * m * m + draw] = 1;
int col = draw % m;
int row = draw / m;
int row_count = 0;
int col_count = 0;
for(j=0; j<m; j++){
col_count += grid[threadIdx.x * m * m + j * m + col];
}
for(j=row*m; j<row * m + m; j++){
row_count += grid[threadIdx.x * m * m + j];
}
if (col_count >= m || row_count >= m){
win = 1;
// printf("ROW: %d, COL: %d\n", row, col);
}
n_draws++;
}
n++;
local_average = rollingAverage(local_average, (double) n_draws, n);
stride += stride_step;
}
average[tid] = local_average;
__syncthreads();
}
int main() {
cudaFree(0); // avoid spoofing profiler.
clock_t begin = clock();
// srand(time(0));
// Init variables
double* average;
int* d_draws;
double* d_average;
// create and allocate for the random state
curandState *d_state;
cudaSafeCall( cudaMalloc(&d_state, N * sizeof(curandState)) );
int threads_per_block = 32;
// int n_blocks = (N + threads_per_block - 1) / threads_per_block;
int n_blocks = 900;
// allocate space on host
average = (double*) malloc(threads_per_block * n_blocks * sizeof(double));
// allocate space on the device for lots of popsicle sticks
cudaSafeCall( cudaMalloc((void**) &d_draws, m * m * sizeof(int) * N) );
cudaSafeCall( cudaMalloc((void**) &d_average, sizeof(double) * threads_per_block * n_blocks) );
cudaCheckError();
setup_kernel<<<1000, threads_per_block>>>(d_state, (unsigned long long) time(NULL));
cudaCheckError();
cudaSafeCall( cudaDeviceSynchronize() );
monte_carlo_kernel<<<n_blocks, threads_per_block, m * m * threads_per_block * sizeof(int)>>>(d_draws, d_state, d_average);
cudaCheckError();
cudaSafeCall( cudaDeviceSynchronize() );
// copy data to host for analysis.
cudaSafeCall( cudaMemcpy(average, d_average, threads_per_block * n_blocks * sizeof(double), cudaMemcpyDeviceToHost) );
double big_avg = 0;
for (int i = 0; i < threads_per_block * n_blocks; i++){
big_avg = rollingAverage(big_avg, average[i], i);
}
printf("Average: %f\n", big_avg);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("TIME: %f\n", time_spent);
cudaFree(d_draws);
return 0;
}
|
d8c16b9561a5a4fd8e8157f1e19707f7ca9c606f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <hip/hip_runtime.h>
int cpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
clock_t start = clock();
for(int i = 0; i < a_n; i++)
{
for(int j = 0; j < b_m; j++)
{
*(matrix_c + i * b_m + j) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + i * b_m + j) += (* (matrix_a + i * a_m + k)) * (* (matrix_b + k * b_m + j));
}
}
}
return (int)(clock() - start)/ (CLOCKS_PER_SEC / 1000);
}
__global__ void matrixMul(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int temp = id;
while(temp < a_n * b_m)
{
int ROW = temp / b_m;
int COL = temp % b_m;
*(matrix_c + ROW * b_m + COL) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + ROW * b_m + COL) += (* (matrix_a + ROW * a_m + k)) * (* (matrix_b + k * b_m + COL));
}
temp +=blockDim.x*gridDim.x;
}
}
int gpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int * dev_matrix_a, * dev_matrix_b, * dev_matrix_c;
hipEvent_t start, stop;
float elapsed_time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc(&dev_matrix_a, sizeof(int) * a_n * a_m);
hipMalloc(&dev_matrix_b, sizeof(int) * b_n * b_m);
hipMalloc(&dev_matrix_c, sizeof(int) * a_n * b_m);
hipMemcpy(dev_matrix_a, matrix_a, sizeof(int) * a_n * a_m, hipMemcpyHostToDevice);
hipMemcpy(dev_matrix_b, matrix_b, sizeof(int) * b_n * b_m, hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matrixMul), dim3(32),dim3(128), 0, 0, dev_matrix_a, dev_matrix_b, dev_matrix_c, a_n, b_n, a_m, b_m);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(matrix_c, dev_matrix_c, sizeof(int) * a_n * b_m, hipMemcpyDeviceToHost);
hipFree(dev_matrix_a);
hipFree(dev_matrix_b);
hipFree(dev_matrix_c);
return floor(elapsed_time);
}
void allocate_matrix(int **matrix, int n, int m)
{
*matrix = (int*) malloc(sizeof(int) * n * m);
}
void generate_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
*(matrix + i*m +j) = rand() % m;
}
}
}
void output_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
printf("\n");
for(int j = 0; j < m; j++){
printf("%d ",*(matrix + i*m +j));
}
}
}
void compute(int a_row, int b_row, int a_col, int b_col, bool show_matrix_flag = false)
{
int * matrix_a, * matrix_b, * matrix_c_cpu, * matrix_c_gpu, time_cpu, time_gpu;
allocate_matrix(&matrix_a, a_row, a_col);
allocate_matrix(&matrix_b, b_row, b_col);
generate_matrix(matrix_a, a_row, a_col);
generate_matrix(matrix_b, b_row, b_col);
if(show_matrix_flag){
printf("\n\n :");
output_matrix(matrix_a, a_row, a_col);
printf("\n\n B:");
output_matrix(matrix_b, b_row, b_col);
}
allocate_matrix(&matrix_c_cpu, a_row, b_col);
//time_cpu = 0;
time_cpu = cpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_cpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\n C(CPU):");
output_matrix(matrix_c_cpu, a_row, b_col);
}
free(matrix_c_cpu);
allocate_matrix(&matrix_c_gpu, a_row, b_col);
time_gpu = gpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_gpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\n C(GPU):");
output_matrix(matrix_c_gpu, a_row, b_col);
}
free(matrix_c_gpu);
free(matrix_a);
free(matrix_b);
if(!show_matrix_flag){
printf("\n\n (ms) A[%d,%d] * B[%d, %d]:", a_row, a_col, b_row, b_col);
printf("CPU - %d, GPU - %d\n", time_cpu, time_gpu);
}
}
int main() {
srand(time(NULL));
compute(5,6,6,2,true);
compute(32,32,32,32);
compute(64,64,64,64);
compute(128,128,128,128);
compute(256,256,256,256);
compute(512,512,512,512);
compute(1024,1024,1024,1024);
//compute(2048,2048,2048,2048);
//compute(10000,10000,10000,10000);
return 0;
} | d8c16b9561a5a4fd8e8157f1e19707f7ca9c606f.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <cuda_runtime.h>
int cpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
clock_t start = clock();
for(int i = 0; i < a_n; i++)
{
for(int j = 0; j < b_m; j++)
{
*(matrix_c + i * b_m + j) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + i * b_m + j) += (* (matrix_a + i * a_m + k)) * (* (matrix_b + k * b_m + j));
}
}
}
return (int)(clock() - start)/ (CLOCKS_PER_SEC / 1000);
}
__global__ void matrixMul(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int temp = id;
while(temp < a_n * b_m)
{
int ROW = temp / b_m;
int COL = temp % b_m;
*(matrix_c + ROW * b_m + COL) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + ROW * b_m + COL) += (* (matrix_a + ROW * a_m + k)) * (* (matrix_b + k * b_m + COL));
}
temp +=blockDim.x*gridDim.x;
}
}
int gpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int * dev_matrix_a, * dev_matrix_b, * dev_matrix_c;
cudaEvent_t start, stop;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&dev_matrix_a, sizeof(int) * a_n * a_m);
cudaMalloc(&dev_matrix_b, sizeof(int) * b_n * b_m);
cudaMalloc(&dev_matrix_c, sizeof(int) * a_n * b_m);
cudaMemcpy(dev_matrix_a, matrix_a, sizeof(int) * a_n * a_m, cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrix_b, matrix_b, sizeof(int) * b_n * b_m, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
matrixMul<<<32,128>>>(dev_matrix_a, dev_matrix_b, dev_matrix_c, a_n, b_n, a_m, b_m);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(matrix_c, dev_matrix_c, sizeof(int) * a_n * b_m, cudaMemcpyDeviceToHost);
cudaFree(dev_matrix_a);
cudaFree(dev_matrix_b);
cudaFree(dev_matrix_c);
return floor(elapsed_time);
}
void allocate_matrix(int **matrix, int n, int m)
{
*matrix = (int*) malloc(sizeof(int) * n * m);
}
void generate_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
*(matrix + i*m +j) = rand() % m;
}
}
}
void output_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
printf("\n");
for(int j = 0; j < m; j++){
printf("%d ",*(matrix + i*m +j));
}
}
}
void compute(int a_row, int b_row, int a_col, int b_col, bool show_matrix_flag = false)
{
int * matrix_a, * matrix_b, * matrix_c_cpu, * matrix_c_gpu, time_cpu, time_gpu;
allocate_matrix(&matrix_a, a_row, a_col);
allocate_matrix(&matrix_b, b_row, b_col);
generate_matrix(matrix_a, a_row, a_col);
generate_matrix(matrix_b, b_row, b_col);
if(show_matrix_flag){
printf("\n\nМатрица А:");
output_matrix(matrix_a, a_row, a_col);
printf("\n\nМатрица B:");
output_matrix(matrix_b, b_row, b_col);
}
allocate_matrix(&matrix_c_cpu, a_row, b_col);
//time_cpu = 0;
time_cpu = cpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_cpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\nМатрица C(CPU):");
output_matrix(matrix_c_cpu, a_row, b_col);
}
free(matrix_c_cpu);
allocate_matrix(&matrix_c_gpu, a_row, b_col);
time_gpu = gpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_gpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\nМатрица C(GPU):");
output_matrix(matrix_c_gpu, a_row, b_col);
}
free(matrix_c_gpu);
free(matrix_a);
free(matrix_b);
if(!show_matrix_flag){
printf("\n\nВремя выполнения (ms) A[%d,%d] * B[%d, %d]:", a_row, a_col, b_row, b_col);
printf("CPU - %d, GPU - %d\n", time_cpu, time_gpu);
}
}
int main() {
srand(time(NULL));
compute(5,6,6,2,true);
compute(32,32,32,32);
compute(64,64,64,64);
compute(128,128,128,128);
compute(256,256,256,256);
compute(512,512,512,512);
compute(1024,1024,1024,1024);
//compute(2048,2048,2048,2048);
//compute(10000,10000,10000,10000);
return 0;
} |
61bb7e56b69d0811753aa9ad543de0dde5cf9157.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/laserscanbuffer.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace geometry {
namespace {
std::pair<float, float> TangentMinMax(float min_angle, float max_angle) {
float min_angle_tan = tan(min_angle);
float max_angle_tan = tan(max_angle);
// Correct sign of tan around singularity points
if (min_angle_tan < 0.0) min_angle_tan = -min_angle_tan;
if (max_angle_tan > 0.0) max_angle_tan = -max_angle_tan;
return std::make_pair(min_angle_tan, max_angle_tan);
}
__device__ bool IsShadow(float r1,
float r2,
float included_angle,
float min_angle_tan,
float max_angle_tan) {
const float perpendicular_y = r2 * sin(included_angle);
const float perpendicular_x = r1 - r2 * cos(included_angle);
const float perpendicular_tan = fabs(perpendicular_y) / perpendicular_x;
if (perpendicular_tan > 0) {
if (perpendicular_tan < min_angle_tan) return true;
} else {
if (perpendicular_tan > max_angle_tan) return true;
}
return false;
}
struct apply_scan_shadow_filter_functor {
apply_scan_shadow_filter_functor(const float* ranges,
float min_angle_tan,
float max_angle_tan,
float angle_increment,
int num_steps,
int window,
int neighbors,
bool remove_shadow_start_point,
float* out)
: ranges_(ranges),
min_angle_tan_(min_angle_tan),
max_angle_tan_(max_angle_tan),
angle_increment_(angle_increment),
num_steps_(num_steps),
window_(window),
neighbors_(neighbors),
remove_shadow_start_point_(remove_shadow_start_point),
out_(out){};
const float* ranges_;
const float min_angle_tan_;
const float max_angle_tan_;
const float angle_increment_;
const int num_steps_;
const int window_;
const int neighbors_;
const bool remove_shadow_start_point_;
float* out_;
__device__ void operator()(size_t idx) {
int n = idx / num_steps_;
int i = idx % num_steps_;
for (int y = -window_; y < window_ + 1; y++) {
int j = i + y;
if (j < 0 || j >= num_steps_ || i == j) continue;
if (IsShadow(ranges_[n * num_steps_ + i],
ranges_[n * num_steps_ + j], y * angle_increment_,
min_angle_tan_, max_angle_tan_)) {
for (int index = max(i - neighbors_, 0);
index <= min(i + neighbors_, num_steps_ - 1); index++) {
if (ranges_[i] < ranges_[index]) {
out_[n * num_steps_ + index] =
std::numeric_limits<float>::quiet_NaN();
}
}
if (remove_shadow_start_point_) {
out_[n * num_steps_ + i] =
std::numeric_limits<float>::quiet_NaN();
}
}
}
}
};
} // namespace
LaserScanBuffer::LaserScanBuffer(int num_steps,
int num_max_scans,
float min_angle,
float max_angle)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
num_steps_(num_steps),
num_max_scans_(num_max_scans),
min_angle_(min_angle),
max_angle_(max_angle) {
ranges_.reserve(num_steps_ * num_max_scans_);
intensities_.reserve(num_steps_ * num_max_scans_);
origins_.reserve(num_max_scans_);
}
LaserScanBuffer::~LaserScanBuffer(){};
LaserScanBuffer::LaserScanBuffer(const LaserScanBuffer& other)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
ranges_(other.ranges_),
intensities_(other.intensities_),
top_(other.top_),
bottom_(other.bottom_),
num_steps_(other.num_steps_),
num_max_scans_(other.num_max_scans_),
min_angle_(other.min_angle_),
max_angle_(other.max_angle_),
origins_(other.origins_) {}
thrust::host_vector<float> LaserScanBuffer::GetRanges() const {
thrust::host_vector<float> ranges;
if (top_ == bottom_) {
return ranges;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = end - start;
ranges.resize(n * num_steps_);
thrust::copy_n(ranges_.begin() + start * num_steps_, n * num_steps_,
ranges.begin());
return ranges;
} else {
ranges.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(ranges_.begin() + start * num_steps_, offset,
ranges.begin());
thrust::copy_n(ranges_.begin(), end * num_steps_,
ranges.begin() + offset);
return ranges;
}
}
thrust::host_vector<float> LaserScanBuffer::GetIntensities() const {
thrust::host_vector<float> intensities;
if (top_ == bottom_) {
return intensities;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = start - end;
intensities.resize(n * num_steps_);
thrust::copy_n(intensities_.begin() + start * num_steps_,
n * num_steps_, intensities.begin());
return intensities;
} else {
intensities.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(intensities_.begin() + start * num_steps_, offset,
intensities.begin());
thrust::copy_n(intensities_.begin(), end * num_steps_,
intensities.begin() + offset);
return intensities;
}
}
LaserScanBuffer& LaserScanBuffer::Clear() {
top_ = 0;
bottom_ = 0;
ranges_.clear();
intensities_.clear();
origins_.clear();
return *this;
}
bool LaserScanBuffer::IsEmpty() const { return bottom_ == top_; }
Eigen::Vector3f LaserScanBuffer::GetMinBound() const {
utility::LogError("LaserScanBuffer::GetMinBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetMaxBound() const {
utility::LogError("LaserScanBuffer::GetMaxBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetCenter() const {
utility::LogError("LaserScanBuffer::GetCenter is not supported");
return Eigen::Vector3f::Zero();
}
AxisAlignedBoundingBox<3> LaserScanBuffer::GetAxisAlignedBoundingBox() const {
utility::LogError(
"LaserScanBuffer::GetAxisAlignedBoundingBox is not supported");
return AxisAlignedBoundingBox<3>();
}
LaserScanBuffer& LaserScanBuffer::Transform(
const Eigen::Matrix4f& transformation) {
thrust::for_each(origins_.begin(), origins_.end(),
[transformation] __device__(Eigen::Matrix4f_u & trans) {
trans = trans * transformation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Translate(const Eigen::Vector3f& translation,
bool relative) {
thrust::for_each(origins_.begin(), origins_.end(),
[translation] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 1>(0, 3) =
trans.block<3, 1>(0, 3) + translation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Scale(const float scale, bool center) {
thrust::for_each(ranges_.begin(), ranges_.end(),
[scale] __device__(float& r) { r *= scale; });
return *this;
}
LaserScanBuffer& LaserScanBuffer::Rotate(const Eigen::Matrix3f& R,
bool center) {
thrust::for_each(origins_.begin(), origins_.end(),
[R] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 3>(0, 0) = trans.block<3, 3>(0, 0) * R;
});
return *this;
}
template <typename ContainerType>
LaserScanBuffer& LaserScanBuffer::AddRanges(
const ContainerType& ranges,
const Eigen::Matrix4f& transformation,
const ContainerType& intensities) {
if (ranges.size() != num_steps_) {
utility::LogError("[AddRanges] Invalid size of input ranges.");
return *this;
}
if (HasIntensities() && ranges.size() != intensities.size()) {
utility::LogError("[AddRanges] Invalid size of intensities.");
return *this;
}
bool add_intensities =
!intensities.empty() && ranges.size() == intensities.size();
int end = bottom_ % num_max_scans_;
if (bottom_ + 1 <= num_max_scans_) {
ranges_.insert(ranges_.end(), ranges.begin(), ranges.end());
if (add_intensities)
intensities_.insert(intensities_.end(), intensities.begin(),
intensities.end());
origins_.push_back(transformation);
bottom_++;
} else {
thrust::copy_n(ranges.begin(), num_steps_,
ranges_.begin() + end * num_steps_);
if (add_intensities)
thrust::copy_n(intensities.begin(), num_steps_,
intensities_.begin() + end * num_steps_);
origins_[end] = transformation;
if (IsFull()) top_++;
bottom_++;
}
return *this;
}
template LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::device_vector<float>& range,
const Eigen::Matrix4f& transformation,
const utility::device_vector<float>& intensities);
template LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::pinned_host_vector<float>& ranges,
const Eigen::Matrix4f& transformation,
const utility::pinned_host_vector<float>& intensities);
class ContainerLikePtr {
public:
ContainerLikePtr(const float* data, size_t size) : data_(data), size_(size) {}
size_t size() const { return size_; }
const float* begin() const { return data_; }
const float* end() const { return data_ + size_; }
bool empty() const { return size_ == 0; }
const float* data_;
size_t size_;
};
LaserScanBuffer &LaserScanBuffer::AddRanges(
const float *ranges,
const Eigen::Matrix4f &transformation,
const float *intensities) {
return AddRanges(ContainerLikePtr(ranges, num_steps_), transformation,
ContainerLikePtr(intensities, num_steps_));
}
LaserScanBuffer &LaserScanBuffer::Merge(const LaserScanBuffer &other) {
if (other.IsEmpty()) {
utility::LogError("[Merge] Input buffer is empty.");
return *this;
}
if (other.num_steps_ != num_steps_) {
utility::LogError("[Merge] Input buffer has different num_steps.");
return *this;
}
if (other.HasIntensities() != HasIntensities()) {
utility::LogError(
"[Merge] Input buffer has different intensities.");
return *this;
}
if (other.min_angle_ != min_angle_ || other.max_angle_ != max_angle_) {
utility::LogError(
"[Merge] Input buffer has different angle range.");
return *this;
}
if (other.bottom_ - other.top_ + bottom_ - top_ > num_max_scans_) {
utility::LogError("[Merge] Buffer is full.");
return *this;
}
ranges_.insert(ranges_.end(), other.ranges_.begin(), other.ranges_.end());
if (HasIntensities()) {
intensities_.insert(intensities_.end(), other.intensities_.begin(),
other.intensities_.end());
}
origins_.insert(origins_.end(), other.origins_.begin(),
other.origins_.end());
bottom_ += other.bottom_ - other.top_;
return *this;
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::PopOneScan() {
if (IsEmpty()) {
utility::LogError("[PopRange] Buffer is empty.");
return nullptr;
}
const int start = top_ % num_max_scans_;
auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_,
min_angle_, max_angle_);
out->ranges_.resize(num_steps_);
thrust::copy_n(ranges_.begin() + start * num_steps_, num_steps_,
out->ranges_.begin());
if (HasIntensities()) {
out->intensities_.resize(num_steps_);
thrust::copy_n(intensities_.begin() + start * num_steps_, num_steps_,
out->intensities_.begin());
}
out->origins_.push_back(origins_[start]);
out->top_ = 0;
out->bottom_ = 1;
top_++;
return out;
}
std::pair<std::unique_ptr<utility::pinned_host_vector<float>>, std::unique_ptr<utility::pinned_host_vector<float>>> LaserScanBuffer::PopHostOneScan() {
if (IsEmpty()) {
utility::LogError("[PopRange] Buffer is empty.");
return std::make_pair(std::make_unique<utility::pinned_host_vector<float>>(), std::make_unique<utility::pinned_host_vector<float>>());
}
const int start = top_ % num_max_scans_;
auto out = std::make_unique<utility::pinned_host_vector<float>>(num_steps_);
cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(out->data()),
thrust::raw_pointer_cast(ranges_.data()) + start * num_steps_,
num_steps_ * sizeof(float), hipMemcpyDeviceToHost));
auto out_intensities = std::make_unique<utility::pinned_host_vector<float>>();
if (HasIntensities()) {
out_intensities->resize(num_steps_);
cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(out_intensities->data()),
thrust::raw_pointer_cast(intensities_.data()) + start * num_steps_,
num_steps_ * sizeof(float), hipMemcpyDeviceToHost));
}
top_++;
cudaSafeCall(hipDeviceSynchronize());
return std::make_pair(std::move(out), std::move(out_intensities));
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::RangeFilter(
float min_range, float max_range) const {
auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_,
min_angle_, max_angle_);
if (max_range <= min_range) {
utility::LogError(
"[RangeFilter] Invalid parameter with min_range greater than "
"max_range.");
}
out->ranges_.resize(ranges_.size());
out->top_ = top_;
out->bottom_ = bottom_;
thrust::transform(
ranges_.begin(), ranges_.end(), out->ranges_.begin(),
[min_range, max_range] __device__(float r) {
return (r < min_range || r > max_range)
? std::numeric_limits<float>::quiet_NaN()
: r;
});
return out;
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::ScanShadowsFilter(
float min_angle,
float max_angle,
int window,
int neighbors,
bool remove_shadow_start_point) const {
auto out = std::make_shared<LaserScanBuffer>(*this);
auto minmax_tan = TangentMinMax(min_angle, max_angle);
apply_scan_shadow_filter_functor func(
thrust::raw_pointer_cast(ranges_.data()), minmax_tan.first,
minmax_tan.second, GetAngleIncrement(), num_steps_, window,
neighbors, remove_shadow_start_point,
thrust::raw_pointer_cast(out->ranges_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(ranges_.size()), func);
return out;
}
} // namespace geometry
} // namespace cupoch | 61bb7e56b69d0811753aa9ad543de0dde5cf9157.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/laserscanbuffer.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace geometry {
namespace {
std::pair<float, float> TangentMinMax(float min_angle, float max_angle) {
float min_angle_tan = tan(min_angle);
float max_angle_tan = tan(max_angle);
// Correct sign of tan around singularity points
if (min_angle_tan < 0.0) min_angle_tan = -min_angle_tan;
if (max_angle_tan > 0.0) max_angle_tan = -max_angle_tan;
return std::make_pair(min_angle_tan, max_angle_tan);
}
__device__ bool IsShadow(float r1,
float r2,
float included_angle,
float min_angle_tan,
float max_angle_tan) {
const float perpendicular_y = r2 * sin(included_angle);
const float perpendicular_x = r1 - r2 * cos(included_angle);
const float perpendicular_tan = fabs(perpendicular_y) / perpendicular_x;
if (perpendicular_tan > 0) {
if (perpendicular_tan < min_angle_tan) return true;
} else {
if (perpendicular_tan > max_angle_tan) return true;
}
return false;
}
struct apply_scan_shadow_filter_functor {
apply_scan_shadow_filter_functor(const float* ranges,
float min_angle_tan,
float max_angle_tan,
float angle_increment,
int num_steps,
int window,
int neighbors,
bool remove_shadow_start_point,
float* out)
: ranges_(ranges),
min_angle_tan_(min_angle_tan),
max_angle_tan_(max_angle_tan),
angle_increment_(angle_increment),
num_steps_(num_steps),
window_(window),
neighbors_(neighbors),
remove_shadow_start_point_(remove_shadow_start_point),
out_(out){};
const float* ranges_;
const float min_angle_tan_;
const float max_angle_tan_;
const float angle_increment_;
const int num_steps_;
const int window_;
const int neighbors_;
const bool remove_shadow_start_point_;
float* out_;
__device__ void operator()(size_t idx) {
int n = idx / num_steps_;
int i = idx % num_steps_;
for (int y = -window_; y < window_ + 1; y++) {
int j = i + y;
if (j < 0 || j >= num_steps_ || i == j) continue;
if (IsShadow(ranges_[n * num_steps_ + i],
ranges_[n * num_steps_ + j], y * angle_increment_,
min_angle_tan_, max_angle_tan_)) {
for (int index = max(i - neighbors_, 0);
index <= min(i + neighbors_, num_steps_ - 1); index++) {
if (ranges_[i] < ranges_[index]) {
out_[n * num_steps_ + index] =
std::numeric_limits<float>::quiet_NaN();
}
}
if (remove_shadow_start_point_) {
out_[n * num_steps_ + i] =
std::numeric_limits<float>::quiet_NaN();
}
}
}
}
};
} // namespace
LaserScanBuffer::LaserScanBuffer(int num_steps,
int num_max_scans,
float min_angle,
float max_angle)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
num_steps_(num_steps),
num_max_scans_(num_max_scans),
min_angle_(min_angle),
max_angle_(max_angle) {
ranges_.reserve(num_steps_ * num_max_scans_);
intensities_.reserve(num_steps_ * num_max_scans_);
origins_.reserve(num_max_scans_);
}
LaserScanBuffer::~LaserScanBuffer(){};
LaserScanBuffer::LaserScanBuffer(const LaserScanBuffer& other)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
ranges_(other.ranges_),
intensities_(other.intensities_),
top_(other.top_),
bottom_(other.bottom_),
num_steps_(other.num_steps_),
num_max_scans_(other.num_max_scans_),
min_angle_(other.min_angle_),
max_angle_(other.max_angle_),
origins_(other.origins_) {}
thrust::host_vector<float> LaserScanBuffer::GetRanges() const {
thrust::host_vector<float> ranges;
if (top_ == bottom_) {
return ranges;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = end - start;
ranges.resize(n * num_steps_);
thrust::copy_n(ranges_.begin() + start * num_steps_, n * num_steps_,
ranges.begin());
return ranges;
} else {
ranges.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(ranges_.begin() + start * num_steps_, offset,
ranges.begin());
thrust::copy_n(ranges_.begin(), end * num_steps_,
ranges.begin() + offset);
return ranges;
}
}
thrust::host_vector<float> LaserScanBuffer::GetIntensities() const {
thrust::host_vector<float> intensities;
if (top_ == bottom_) {
return intensities;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = start - end;
intensities.resize(n * num_steps_);
thrust::copy_n(intensities_.begin() + start * num_steps_,
n * num_steps_, intensities.begin());
return intensities;
} else {
intensities.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(intensities_.begin() + start * num_steps_, offset,
intensities.begin());
thrust::copy_n(intensities_.begin(), end * num_steps_,
intensities.begin() + offset);
return intensities;
}
}
LaserScanBuffer& LaserScanBuffer::Clear() {
top_ = 0;
bottom_ = 0;
ranges_.clear();
intensities_.clear();
origins_.clear();
return *this;
}
bool LaserScanBuffer::IsEmpty() const { return bottom_ == top_; }
Eigen::Vector3f LaserScanBuffer::GetMinBound() const {
utility::LogError("LaserScanBuffer::GetMinBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetMaxBound() const {
utility::LogError("LaserScanBuffer::GetMaxBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetCenter() const {
utility::LogError("LaserScanBuffer::GetCenter is not supported");
return Eigen::Vector3f::Zero();
}
AxisAlignedBoundingBox<3> LaserScanBuffer::GetAxisAlignedBoundingBox() const {
utility::LogError(
"LaserScanBuffer::GetAxisAlignedBoundingBox is not supported");
return AxisAlignedBoundingBox<3>();
}
LaserScanBuffer& LaserScanBuffer::Transform(
const Eigen::Matrix4f& transformation) {
thrust::for_each(origins_.begin(), origins_.end(),
[transformation] __device__(Eigen::Matrix4f_u & trans) {
trans = trans * transformation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Translate(const Eigen::Vector3f& translation,
bool relative) {
thrust::for_each(origins_.begin(), origins_.end(),
[translation] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 1>(0, 3) =
trans.block<3, 1>(0, 3) + translation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Scale(const float scale, bool center) {
thrust::for_each(ranges_.begin(), ranges_.end(),
[scale] __device__(float& r) { r *= scale; });
return *this;
}
LaserScanBuffer& LaserScanBuffer::Rotate(const Eigen::Matrix3f& R,
bool center) {
thrust::for_each(origins_.begin(), origins_.end(),
[R] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 3>(0, 0) = trans.block<3, 3>(0, 0) * R;
});
return *this;
}
template <typename ContainerType>
LaserScanBuffer& LaserScanBuffer::AddRanges(
const ContainerType& ranges,
const Eigen::Matrix4f& transformation,
const ContainerType& intensities) {
if (ranges.size() != num_steps_) {
utility::LogError("[AddRanges] Invalid size of input ranges.");
return *this;
}
if (HasIntensities() && ranges.size() != intensities.size()) {
utility::LogError("[AddRanges] Invalid size of intensities.");
return *this;
}
bool add_intensities =
!intensities.empty() && ranges.size() == intensities.size();
int end = bottom_ % num_max_scans_;
if (bottom_ + 1 <= num_max_scans_) {
ranges_.insert(ranges_.end(), ranges.begin(), ranges.end());
if (add_intensities)
intensities_.insert(intensities_.end(), intensities.begin(),
intensities.end());
origins_.push_back(transformation);
bottom_++;
} else {
thrust::copy_n(ranges.begin(), num_steps_,
ranges_.begin() + end * num_steps_);
if (add_intensities)
thrust::copy_n(intensities.begin(), num_steps_,
intensities_.begin() + end * num_steps_);
origins_[end] = transformation;
if (IsFull()) top_++;
bottom_++;
}
return *this;
}
template LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::device_vector<float>& range,
const Eigen::Matrix4f& transformation,
const utility::device_vector<float>& intensities);
template LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::pinned_host_vector<float>& ranges,
const Eigen::Matrix4f& transformation,
const utility::pinned_host_vector<float>& intensities);
class ContainerLikePtr {
public:
ContainerLikePtr(const float* data, size_t size) : data_(data), size_(size) {}
size_t size() const { return size_; }
const float* begin() const { return data_; }
const float* end() const { return data_ + size_; }
bool empty() const { return size_ == 0; }
const float* data_;
size_t size_;
};
LaserScanBuffer &LaserScanBuffer::AddRanges(
const float *ranges,
const Eigen::Matrix4f &transformation,
const float *intensities) {
return AddRanges(ContainerLikePtr(ranges, num_steps_), transformation,
ContainerLikePtr(intensities, num_steps_));
}
LaserScanBuffer &LaserScanBuffer::Merge(const LaserScanBuffer &other) {
if (other.IsEmpty()) {
utility::LogError("[Merge] Input buffer is empty.");
return *this;
}
if (other.num_steps_ != num_steps_) {
utility::LogError("[Merge] Input buffer has different num_steps.");
return *this;
}
if (other.HasIntensities() != HasIntensities()) {
utility::LogError(
"[Merge] Input buffer has different intensities.");
return *this;
}
if (other.min_angle_ != min_angle_ || other.max_angle_ != max_angle_) {
utility::LogError(
"[Merge] Input buffer has different angle range.");
return *this;
}
if (other.bottom_ - other.top_ + bottom_ - top_ > num_max_scans_) {
utility::LogError("[Merge] Buffer is full.");
return *this;
}
ranges_.insert(ranges_.end(), other.ranges_.begin(), other.ranges_.end());
if (HasIntensities()) {
intensities_.insert(intensities_.end(), other.intensities_.begin(),
other.intensities_.end());
}
origins_.insert(origins_.end(), other.origins_.begin(),
other.origins_.end());
bottom_ += other.bottom_ - other.top_;
return *this;
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::PopOneScan() {
if (IsEmpty()) {
utility::LogError("[PopRange] Buffer is empty.");
return nullptr;
}
const int start = top_ % num_max_scans_;
auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_,
min_angle_, max_angle_);
out->ranges_.resize(num_steps_);
thrust::copy_n(ranges_.begin() + start * num_steps_, num_steps_,
out->ranges_.begin());
if (HasIntensities()) {
out->intensities_.resize(num_steps_);
thrust::copy_n(intensities_.begin() + start * num_steps_, num_steps_,
out->intensities_.begin());
}
out->origins_.push_back(origins_[start]);
out->top_ = 0;
out->bottom_ = 1;
top_++;
return out;
}
std::pair<std::unique_ptr<utility::pinned_host_vector<float>>, std::unique_ptr<utility::pinned_host_vector<float>>> LaserScanBuffer::PopHostOneScan() {
if (IsEmpty()) {
utility::LogError("[PopRange] Buffer is empty.");
return std::make_pair(std::make_unique<utility::pinned_host_vector<float>>(), std::make_unique<utility::pinned_host_vector<float>>());
}
const int start = top_ % num_max_scans_;
auto out = std::make_unique<utility::pinned_host_vector<float>>(num_steps_);
cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(out->data()),
thrust::raw_pointer_cast(ranges_.data()) + start * num_steps_,
num_steps_ * sizeof(float), cudaMemcpyDeviceToHost));
auto out_intensities = std::make_unique<utility::pinned_host_vector<float>>();
if (HasIntensities()) {
out_intensities->resize(num_steps_);
cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(out_intensities->data()),
thrust::raw_pointer_cast(intensities_.data()) + start * num_steps_,
num_steps_ * sizeof(float), cudaMemcpyDeviceToHost));
}
top_++;
cudaSafeCall(cudaDeviceSynchronize());
return std::make_pair(std::move(out), std::move(out_intensities));
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::RangeFilter(
float min_range, float max_range) const {
auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_,
min_angle_, max_angle_);
if (max_range <= min_range) {
utility::LogError(
"[RangeFilter] Invalid parameter with min_range greater than "
"max_range.");
}
out->ranges_.resize(ranges_.size());
out->top_ = top_;
out->bottom_ = bottom_;
thrust::transform(
ranges_.begin(), ranges_.end(), out->ranges_.begin(),
[min_range, max_range] __device__(float r) {
return (r < min_range || r > max_range)
? std::numeric_limits<float>::quiet_NaN()
: r;
});
return out;
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::ScanShadowsFilter(
float min_angle,
float max_angle,
int window,
int neighbors,
bool remove_shadow_start_point) const {
auto out = std::make_shared<LaserScanBuffer>(*this);
auto minmax_tan = TangentMinMax(min_angle, max_angle);
apply_scan_shadow_filter_functor func(
thrust::raw_pointer_cast(ranges_.data()), minmax_tan.first,
minmax_tan.second, GetAngleIncrement(), num_steps_, window,
neighbors, remove_shadow_start_point,
thrust::raw_pointer_cast(out->ranges_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(ranges_.size()), func);
return out;
}
} // namespace geometry
} // namespace cupoch |
0cabd85d9ddc5876d0ed0b01372948c825475836.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__
inline void res_calc_gpu(const double *x1, const double *x2, const double *q1, const double *q2,
const double *adt1, const double *adt2, double *res1, double *res2) {
double dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/q2[0];
p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2]));
vol2 = ri*(q2[1]*dy - q2[2]*dx);
mu = 0.5f*((*adt1)+(*adt2))*eps;
f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]);
res1[0] += f;
res2[0] -= f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]);
res1[1] += f;
res2[1] -= f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]);
res1[2] += f;
res2[2] -= f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]);
res1[3] += f;
res2[3] -= f;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<4; d++ ){
arg7_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
res_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg1+map3idx*4,
ind_arg2+map2idx*1,
ind_arg2+map3idx*1,
arg6_l,
arg7_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map2idx*4];
arg6_l[1] += ind_arg3[1+map2idx*4];
arg6_l[2] += ind_arg3[2+map2idx*4];
arg6_l[3] += ind_arg3[3+map2idx*4];
arg7_l[0] += ind_arg3[0+map3idx*4];
arg7_l[1] += ind_arg3[1+map3idx*4];
arg7_l[2] += ind_arg3[2+map3idx*4];
arg7_l[3] += ind_arg3[3+map3idx*4];
ind_arg3[0+map2idx*4] = arg6_l[0];
ind_arg3[1+map2idx*4] = arg6_l[1];
ind_arg3[2+map2idx*4] = arg6_l[2];
ind_arg3[3+map2idx*4] = arg6_l[3];
ind_arg3[0+map3idx*4] = arg7_l[0];
ind_arg3[1+map3idx*4] = arg7_l[1];
ind_arg3[2+map3idx*4] = arg7_l[2];
ind_arg3[3+map3idx*4] = arg7_l[3];
}
__syncthreads();
}
}
}
//GPU host stub function
void op_par_loop_res_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(2);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (OP_kernels[2].count==1) op_register_strides();
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
void op_par_loop_res_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
if (OP_hybrid_gpu) {
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}else{
op_par_loop_res_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
}
#else
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
#endif //OP_HYBRID_GPU
| 0cabd85d9ddc5876d0ed0b01372948c825475836.cu | //
// auto-generated by op2.py
//
//user function
__device__
inline void res_calc_gpu(const double *x1, const double *x2, const double *q1, const double *q2,
const double *adt1, const double *adt2, double *res1, double *res2) {
double dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/q2[0];
p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2]));
vol2 = ri*(q2[1]*dy - q2[2]*dx);
mu = 0.5f*((*adt1)+(*adt2))*eps;
f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]);
res1[0] += f;
res2[0] -= f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]);
res1[1] += f;
res2[1] -= f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]);
res1[2] += f;
res2[2] -= f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]);
res1[3] += f;
res2[3] -= f;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<4; d++ ){
arg7_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
res_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg1+map3idx*4,
ind_arg2+map2idx*1,
ind_arg2+map3idx*1,
arg6_l,
arg7_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map2idx*4];
arg6_l[1] += ind_arg3[1+map2idx*4];
arg6_l[2] += ind_arg3[2+map2idx*4];
arg6_l[3] += ind_arg3[3+map2idx*4];
arg7_l[0] += ind_arg3[0+map3idx*4];
arg7_l[1] += ind_arg3[1+map3idx*4];
arg7_l[2] += ind_arg3[2+map3idx*4];
arg7_l[3] += ind_arg3[3+map3idx*4];
ind_arg3[0+map2idx*4] = arg6_l[0];
ind_arg3[1+map2idx*4] = arg6_l[1];
ind_arg3[2+map2idx*4] = arg6_l[2];
ind_arg3[3+map2idx*4] = arg6_l[3];
ind_arg3[0+map3idx*4] = arg7_l[0];
ind_arg3[1+map3idx*4] = arg7_l[1];
ind_arg3[2+map3idx*4] = arg7_l[2];
ind_arg3[3+map3idx*4] = arg7_l[3];
}
__syncthreads();
}
}
}
//GPU host stub function
void op_par_loop_res_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(2);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (OP_kernels[2].count==1) op_register_strides();
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_res_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
void op_par_loop_res_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
if (OP_hybrid_gpu) {
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}else{
op_par_loop_res_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
}
#else
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
#endif //OP_HYBRID_GPU
|
30365b7f18fa7a6b3c2715c96b90cae604754003.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
__global__ void global_order_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its)
{
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=in1[q];
q=in2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
hipMalloc((void**)&d_time,sizeof(double)*blocks*threads);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
hipLaunchKernelGGL(( global_order_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its);
hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
hipFree(d_time);
hipFree(d_out);
hipFree(d_in1);
hipFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
for (int i = 0; i <= 1024; i += 32) {
int blocks = (i == 0 ? 1 : i);
int threads = 256;
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} | 30365b7f18fa7a6b3c2715c96b90cae604754003.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
__global__ void global_order_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its)
{
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=in1[q];
q=in2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
global_order_1<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its);
cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
cudaFree(d_time);
cudaFree(d_out);
cudaFree(d_in1);
cudaFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
for (int i = 0; i <= 1024; i += 32) {
int blocks = (i == 0 ? 1 : i);
int threads = 256;
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} |
72ac3babbfbd419e10ec0fb0bef2a79731e576f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twistedclover {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_TWISTED_CLOVER_DIRAC
#include <tmc_dslash_def.h> // Twisted Clover kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted_clover
// declare the dslash events
#include <dslash_events.cuh>
using namespace twistedclover;
#ifdef GPU_TWISTED_CLOVER_DIRAC
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct,dagger),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
dslashParam.gauge0 = (void*)gauge0;
dslashParam.gauge1 = (void*)gauge1;
dslashParam.a = kappa;
dslashParam.a_f = kappa;
dslashParam.b = mu;
dslashParam.b_f = mu;
dslashParam.cl_stride = cl_stride;
dslashParam.fl_stride = in->VolumeCB();
dslashParam.clover = (void*)clover;
dslashParam.cloverNorm = (float*)cNorm;
dslashParam.cloverInv = (void*)cloverInv;
dslashParam.cloverInvNorm = (float*)cNrm2;
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,CloverTwistInvDslash");
#else
fillAux(INTERIOR_KERNEL, "type=interior,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,CloverTwistInvDynDslash");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,CloverTwistInvDslash");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,CloverTwistInvDynDslash");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,Dslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,Dslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,Dslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,Dslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,Dslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,Dslash");
#else
fillAux(INTERIOR_KERNEL, "type=interior,DynDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DynDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DynDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DynDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DynDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DynDslash");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,Dslash");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,DynDslash");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DslashCloverTwist");
#else
fillAux(INTERIOR_KERNEL, "type=interior,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DynDslashCloverTwist");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,DslashCloverTwist");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,DynDslashCloverTwist");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
default:
errorQuda("Unsupported twisted-dslash type %d", dslashType);
}
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
// factor of 2 (or 1) for T-dimensional spin projection (FIXME - unnecessary)
dslashParam.tProjScale = getKernelPackT() ? 1.0 : 2.0;
dslashParam.tProjScale_f = (float)(dslashParam.tProjScale);
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
default:
errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const {
int clover_flops = 504 + 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_TWISTED_CLOVER_DIRAC
#include <dslash_policy.cuh>
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->createComms(1);
#ifdef GPU_TWISTED_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 0.;
twist_b = 0.;
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP=0, *cloverNormP=0, *cloverInvP=0, *cloverInvNormP=0;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
#ifndef DYNAMIC_CLOVER
if (clover->stride != cloverInv->stride)
errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover->stride, cloverInv->stride);
#endif
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
}
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
delete dslash;
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
errorQuda("Twisted clover dslash has not been built");
#endif
}
}
| 72ac3babbfbd419e10ec0fb0bef2a79731e576f2.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twistedclover {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_TWISTED_CLOVER_DIRAC
#include <tmc_dslash_def.h> // Twisted Clover kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted_clover
// declare the dslash events
#include <dslash_events.cuh>
using namespace twistedclover;
#ifdef GPU_TWISTED_CLOVER_DIRAC
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct,dagger),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
dslashParam.gauge0 = (void*)gauge0;
dslashParam.gauge1 = (void*)gauge1;
dslashParam.a = kappa;
dslashParam.a_f = kappa;
dslashParam.b = mu;
dslashParam.b_f = mu;
dslashParam.cl_stride = cl_stride;
dslashParam.fl_stride = in->VolumeCB();
dslashParam.clover = (void*)clover;
dslashParam.cloverNorm = (float*)cNorm;
dslashParam.cloverInv = (void*)cloverInv;
dslashParam.cloverInvNorm = (float*)cNrm2;
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,CloverTwistInvDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,CloverTwistInvDslash");
#else
fillAux(INTERIOR_KERNEL, "type=interior,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,CloverTwistInvDynDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,CloverTwistInvDynDslash");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,CloverTwistInvDslash");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,CloverTwistInvDynDslash");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,Dslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,Dslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,Dslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,Dslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,Dslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,Dslash");
#else
fillAux(INTERIOR_KERNEL, "type=interior,DynDslash");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DynDslash");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DynDslash");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DynDslash");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DynDslash");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DynDslash");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,Dslash");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,DynDslash");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
#ifdef MULTI_GPU
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=interior,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DslashCloverTwist");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DslashCloverTwist");
#else
fillAux(INTERIOR_KERNEL, "type=interior,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_ALL, "type=exterior_all,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_X, "type=exterior_x,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Y, "type=exterior_y,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_Z, "type=exterior_z,DynDslashCloverTwist");
fillAux(EXTERIOR_KERNEL_T, "type=exterior_t,DynDslashCloverTwist");
#endif // DYNAMIC_CLOVER
#else
#ifndef DYNAMIC_CLOVER
fillAux(INTERIOR_KERNEL, "type=single-GPU,DslashCloverTwist");
#else
fillAux(INTERIOR_KERNEL, "type=single-GPU,DynDslashCloverTwist");
#endif // DYNAMIC_CLOVER
#endif // MULTI_GPU
break;
default:
errorQuda("Unsupported twisted-dslash type %d", dslashType);
}
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
// factor of 2 (or 1) for T-dimensional spin projection (FIXME - unnecessary)
dslashParam.tProjScale = getKernelPackT() ? 1.0 : 2.0;
dslashParam.tProjScale_f = (float)(dslashParam.tProjScale);
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
default:
errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const {
int clover_flops = 504 + 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_TWISTED_CLOVER_DIRAC
#include <dslash_policy.cuh>
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->createComms(1);
#ifdef GPU_TWISTED_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 0.;
twist_b = 0.;
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP=0, *cloverNormP=0, *cloverInvP=0, *cloverInvNormP=0;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
#ifndef DYNAMIC_CLOVER
if (clover->stride != cloverInv->stride)
errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover->stride, cloverInv->stride);
#endif
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
}
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
delete dslash;
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
errorQuda("Twisted clover dslash has not been built");
#endif
}
}
|
a9119d8c25df34cb0fd063b48c37918d46e3ba32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
__global__
void cuda_add(const int *ar, const int *br, int *cr) {
const unsigned idx = threadIdx.x + blockDim.x * blockIdx.x;
const int a = ar[idx];
const int b = br[idx];
cr[idx] = a + b;
}
__host__
int main(void) {
const unsigned threads = 1<<16;
const unsigned size = threads*sizeof(int);
// Initialize host arrays
int *a_host = new int[threads];
int *b_host = new int[threads];
int *c_host = new int[threads];
for (unsigned i=0; i<threads; i++) {
a_host[i] = (std::rand()%10);
b_host[i] = (std::rand()%10);
}
// Initialize device arrays
int *a_dev = NULL;
int *b_dev = NULL;
int *c_dev = NULL;
hipMalloc((void**)&a_dev,size);
hipMalloc((void**)&b_dev,size);
hipMalloc((void**)&c_dev,size);
// Transfer memory
hipMemcpy((void*)a_dev,(void*)a_host,size,hipMemcpyHostToDevice);
hipMemcpy((void*)b_dev,(void*)b_host,size,hipMemcpyHostToDevice);
// Setup and launch kernel
const unsigned threads_per_block = 512;
const unsigned blocks_per_grid = threads / threads_per_block;
hipLaunchKernelGGL(( cuda_add), dim3(threads_per_block),dim3(blocks_per_grid), 0, 0, a_dev,b_dev,c_dev);
// Copy back result and print it
hipMemcpy((void*)c_host,(void*)c_dev,size,hipMemcpyDeviceToHost);
for (size_t i=0; i<threads; i++) std::cout << c_host[i] << " ";
std::cout << std::endl;
// Clean up
delete a_host;
delete b_host;
delete c_host;
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
return 0;
} | a9119d8c25df34cb0fd063b48c37918d46e3ba32.cu | #include <iostream>
#include <cstdlib>
__global__
void cuda_add(const int *ar, const int *br, int *cr) {
const unsigned idx = threadIdx.x + blockDim.x * blockIdx.x;
const int a = ar[idx];
const int b = br[idx];
cr[idx] = a + b;
}
__host__
int main(void) {
const unsigned threads = 1<<16;
const unsigned size = threads*sizeof(int);
// Initialize host arrays
int *a_host = new int[threads];
int *b_host = new int[threads];
int *c_host = new int[threads];
for (unsigned i=0; i<threads; i++) {
a_host[i] = (std::rand()%10);
b_host[i] = (std::rand()%10);
}
// Initialize device arrays
int *a_dev = NULL;
int *b_dev = NULL;
int *c_dev = NULL;
cudaMalloc((void**)&a_dev,size);
cudaMalloc((void**)&b_dev,size);
cudaMalloc((void**)&c_dev,size);
// Transfer memory
cudaMemcpy((void*)a_dev,(void*)a_host,size,cudaMemcpyHostToDevice);
cudaMemcpy((void*)b_dev,(void*)b_host,size,cudaMemcpyHostToDevice);
// Setup and launch kernel
const unsigned threads_per_block = 512;
const unsigned blocks_per_grid = threads / threads_per_block;
cuda_add<<<threads_per_block,blocks_per_grid>>>(a_dev,b_dev,c_dev);
// Copy back result and print it
cudaMemcpy((void*)c_host,(void*)c_dev,size,cudaMemcpyDeviceToHost);
for (size_t i=0; i<threads; i++) std::cout << c_host[i] << " ";
std::cout << std::endl;
// Clean up
delete a_host;
delete b_host;
delete c_host;
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
return 0;
} |
9ecf8d33605e23bbef3ac4dc0885060b93baa45f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#define SIGN(x) (x > 1) ? 1 : ((x < -1) ? -1 : x)
#define NUM_STATES (1<<20)
#define NUM_COPYS (1 << 10)
#define NUM_NOS 96
#define N 3
typedef unsigned long uint64;
__device__
bool comp(uint64 *S0, uint64 *S1){
for (int i = 0; i < N; ++i) {
if(S1[i] != S0[i])
return false;
}
return true;
}
__device__
short getDecValue(short v){
short vp = 0;
switch(v){
case 0:
case 2: vp = 0;
break;
case 1: vp = 1;
break;
case 3: vp = -1;
break;
}
return vp;
}
__device__
short get2bit(short idx,uint64 v){
idx = idx*2;
return (v >> idx) & 3;
}
__device__
void set2bit(short idx, short newV, uint64 *v){
uint64 mask = 3;
idx = idx*2;
newV = (newV == -1)? 3 : newV;
*v &= ~(mask << idx);
*v |= ((uint64)newV << idx);
}
__device__
short getBlockIdx(short idx){
idx = idx*2;
if(idx<64)
return 0;
else if(idx>127)
return 2;
else
return 1;
}
__device__
void pass(uint64 *S) {
uint64 Sc[N];
for (int i = 0; i < N; ++i) {
Sc[i] = S[i];
}
set2bit(0,1,&S[getBlockIdx(0)]);
set2bit(1,1,&S[getBlockIdx(1)]);
set2bit(2,-1,&S[getBlockIdx(2)]);
set2bit(3,0,&S[getBlockIdx(3)]);
set2bit(4,-1,&S[getBlockIdx(4)]);
set2bit(5,0,&S[getBlockIdx(5)]);
set2bit(6,1,&S[getBlockIdx(6)]);
set2bit(7,1,&S[getBlockIdx(7)]);
set2bit(8,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(8)]);
set2bit(9,SIGN( + getDecValue(get2bit(0,Sc[getBlockIdx(0)])) + getDecValue(get2bit(71,Sc[getBlockIdx(71)]))),&S[getBlockIdx(9)]);
set2bit(10,SIGN( + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(60,Sc[getBlockIdx(60)])) + (-1)),&S[getBlockIdx(10)]);
set2bit(11,SIGN( - (getDecValue(get2bit(2,Sc[getBlockIdx(2)]))) + 1),&S[getBlockIdx(11)]);
set2bit(12,SIGN( - (getDecValue(get2bit(13,Sc[getBlockIdx(13)]))) + 1),&S[getBlockIdx(12)]);
set2bit(13,SIGN( + getDecValue(get2bit(14,Sc[getBlockIdx(14)])) + getDecValue(get2bit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(13)]);
set2bit(14,SIGN( + getDecValue(get2bit(1,Sc[getBlockIdx(1)]))),&S[getBlockIdx(14)]);
set2bit(15,SIGN( + getDecValue(get2bit(2,Sc[getBlockIdx(2)])) - (getDecValue(get2bit(4,Sc[getBlockIdx(4)])))),&S[getBlockIdx(15)]);
set2bit(16,SIGN( - (getDecValue(get2bit(12,Sc[getBlockIdx(12)]))) + getDecValue(get2bit(14,Sc[getBlockIdx(14)])) + 1),&S[getBlockIdx(16)]);
set2bit(17,SIGN( + getDecValue(get2bit(16,Sc[getBlockIdx(16)])) + getDecValue(get2bit(78,Sc[getBlockIdx(78)]))),&S[getBlockIdx(17)]);
set2bit(18,SIGN( + getDecValue(get2bit(17,Sc[getBlockIdx(17)])) - (getDecValue(get2bit(7,Sc[getBlockIdx(7)]))) - (getDecValue(get2bit(92,Sc[getBlockIdx(92)]))) + 1),&S[getBlockIdx(18)]);
set2bit(19,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(19)]);
set2bit(20,SIGN( + getDecValue(get2bit(13,Sc[getBlockIdx(13)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)])) - (getDecValue(get2bit(37,Sc[getBlockIdx(37)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(82,Sc[getBlockIdx(82)]))),&S[getBlockIdx(20)]);
set2bit(21,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(20,Sc[getBlockIdx(20)])) + getDecValue(get2bit(20,Sc[getBlockIdx(20)])) - (getDecValue(get2bit(76,Sc[getBlockIdx(76)]))) + getDecValue(get2bit(95,Sc[getBlockIdx(95)])) + (-1)),&S[getBlockIdx(21)]);
set2bit(22,SIGN( + getDecValue(get2bit(13,Sc[getBlockIdx(13)])) + getDecValue(get2bit(16,Sc[getBlockIdx(16)]))),&S[getBlockIdx(22)]);
set2bit(23,SIGN( + getDecValue(get2bit(22,Sc[getBlockIdx(22)]))),&S[getBlockIdx(23)]);
set2bit(24,SIGN( + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + getDecValue(get2bit(23,Sc[getBlockIdx(23)]))),&S[getBlockIdx(24)]);
set2bit(25,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + (-1)),&S[getBlockIdx(25)]);
set2bit(26,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(26)]);
set2bit(27,SIGN( + getDecValue(get2bit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(27)]);
set2bit(28,SIGN( + getDecValue(get2bit(7,Sc[getBlockIdx(7)])) + 1),&S[getBlockIdx(28)]);
set2bit(29,SIGN( - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) - (getDecValue(get2bit(27,Sc[getBlockIdx(27)]))) - (getDecValue(get2bit(33,Sc[getBlockIdx(33)]))) + 3),&S[getBlockIdx(29)]);
set2bit(30,SIGN( + getDecValue(get2bit(28,Sc[getBlockIdx(28)])) + getDecValue(get2bit(29,Sc[getBlockIdx(29)])) + (-1)),&S[getBlockIdx(30)]);
set2bit(31,SIGN( - (getDecValue(get2bit(30,Sc[getBlockIdx(30)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + 1),&S[getBlockIdx(31)]);
set2bit(32,SIGN( - (getDecValue(get2bit(10,Sc[getBlockIdx(10)]))) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)]))),&S[getBlockIdx(32)]);
set2bit(33,SIGN( + getDecValue(get2bit(15,Sc[getBlockIdx(15)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(42,Sc[getBlockIdx(42)])) - (getDecValue(get2bit(72,Sc[getBlockIdx(72)]))) + (-1)),&S[getBlockIdx(33)]);
set2bit(34,SIGN( + getDecValue(get2bit(4,Sc[getBlockIdx(4)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)])) - (getDecValue(get2bit(36,Sc[getBlockIdx(36)]))) - (getDecValue(get2bit(36,Sc[getBlockIdx(36)]))) - (getDecValue(get2bit(37,Sc[getBlockIdx(37)]))) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(69,Sc[getBlockIdx(69)]))) + 2),&S[getBlockIdx(34)]);
set2bit(35,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(35)]);
set2bit(36,SIGN( - (getDecValue(get2bit(4,Sc[getBlockIdx(4)]))) - (getDecValue(get2bit(71,Sc[getBlockIdx(71)]))) + 1),&S[getBlockIdx(36)]);
set2bit(37,SIGN( - (getDecValue(get2bit(4,Sc[getBlockIdx(4)]))) + getDecValue(get2bit(71,Sc[getBlockIdx(71)])) + 1),&S[getBlockIdx(37)]);
set2bit(38,SIGN( - (getDecValue(get2bit(8,Sc[getBlockIdx(8)]))) + getDecValue(get2bit(39,Sc[getBlockIdx(39)])) + getDecValue(get2bit(6,Sc[getBlockIdx(6)])) - (getDecValue(get2bit(40,Sc[getBlockIdx(40)]))) - (getDecValue(get2bit(63,Sc[getBlockIdx(63)]))) + (-1)),&S[getBlockIdx(38)]);
set2bit(39,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(23,Sc[getBlockIdx(23)])) - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + getDecValue(get2bit(70,Sc[getBlockIdx(70)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)])) + getDecValue(get2bit(5,Sc[getBlockIdx(5)])) + (-1)),&S[getBlockIdx(39)]);
set2bit(40,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(40)]);
set2bit(41,SIGN( - (getDecValue(get2bit(22,Sc[getBlockIdx(22)]))) - (getDecValue(get2bit(23,Sc[getBlockIdx(23)]))) - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(72,Sc[getBlockIdx(72)])) + 1),&S[getBlockIdx(41)]);
set2bit(42,SIGN( - (getDecValue(get2bit(41,Sc[getBlockIdx(41)]))) + 1),&S[getBlockIdx(42)]);
set2bit(43,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) - (getDecValue(get2bit(60,Sc[getBlockIdx(60)]))) + getDecValue(get2bit(88,Sc[getBlockIdx(88)])) + 1),&S[getBlockIdx(43)]);
set2bit(44,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(45,Sc[getBlockIdx(45)]))) - (getDecValue(get2bit(46,Sc[getBlockIdx(46)])))),&S[getBlockIdx(44)]);
set2bit(45,SIGN( - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) + getDecValue(get2bit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(45)]);
set2bit(46,SIGN( - (getDecValue(get2bit(22,Sc[getBlockIdx(22)]))) - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + 1),&S[getBlockIdx(46)]);
set2bit(47,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(46,Sc[getBlockIdx(46)]))) + 1),&S[getBlockIdx(47)]);
set2bit(48,SIGN( - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) - (getDecValue(get2bit(53,Sc[getBlockIdx(53)]))) - (getDecValue(get2bit(54,Sc[getBlockIdx(54)]))) - (getDecValue(get2bit(60,Sc[getBlockIdx(60)]))) + 2),&S[getBlockIdx(48)]);
set2bit(49,SIGN( - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + 1),&S[getBlockIdx(49)]);
set2bit(50,SIGN( + getDecValue(get2bit(16,Sc[getBlockIdx(16)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + (-3)),&S[getBlockIdx(50)]);
set2bit(51,SIGN( + getDecValue(get2bit(51,Sc[getBlockIdx(51)])) - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(56,Sc[getBlockIdx(56)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) + getDecValue(get2bit(80,Sc[getBlockIdx(80)])) + getDecValue(get2bit(81,Sc[getBlockIdx(81)]))),&S[getBlockIdx(51)]);
set2bit(52,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)]))) - (getDecValue(get2bit(56,Sc[getBlockIdx(56)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) + 1),&S[getBlockIdx(52)]);
set2bit(53,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) - (getDecValue(get2bit(64,Sc[getBlockIdx(64)]))) - (getDecValue(get2bit(69,Sc[getBlockIdx(69)]))) + getDecValue(get2bit(70,Sc[getBlockIdx(70)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)])) + getDecValue(get2bit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(53)]);
set2bit(54,SIGN( - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)])))),&S[getBlockIdx(54)]);
set2bit(55,SIGN( - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) + getDecValue(get2bit(56,Sc[getBlockIdx(56)])) + 1),&S[getBlockIdx(55)]);
set2bit(56,SIGN( + getDecValue(get2bit(52,Sc[getBlockIdx(52)])) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)])))),&S[getBlockIdx(56)]);
set2bit(57,SIGN( + getDecValue(get2bit(51,Sc[getBlockIdx(51)])) + getDecValue(get2bit(52,Sc[getBlockIdx(52)])) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)]))) + getDecValue(get2bit(56,Sc[getBlockIdx(56)])) + getDecValue(get2bit(57,Sc[getBlockIdx(57)]))),&S[getBlockIdx(57)]);
set2bit(58,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) - (getDecValue(get2bit(53,Sc[getBlockIdx(53)]))) + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) + 1),&S[getBlockIdx(58)]);
set2bit(59,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) - (getDecValue(get2bit(78,Sc[getBlockIdx(78)]))) + 1),&S[getBlockIdx(59)]);
set2bit(60,SIGN( + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(50,Sc[getBlockIdx(50)]))) - (getDecValue(get2bit(87,Sc[getBlockIdx(87)]))) + 1),&S[getBlockIdx(60)]);
set2bit(61,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)])) + getDecValue(get2bit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(61)]);
set2bit(62,SIGN( + getDecValue(get2bit(61,Sc[getBlockIdx(61)])) + getDecValue(get2bit(85,Sc[getBlockIdx(85)])) + (-1)),&S[getBlockIdx(62)]);
set2bit(63,SIGN( + getDecValue(get2bit(61,Sc[getBlockIdx(61)]))),&S[getBlockIdx(63)]);
set2bit(64,SIGN( + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) + getDecValue(get2bit(85,Sc[getBlockIdx(85)]))),&S[getBlockIdx(64)]);
set2bit(65,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(65)]);
set2bit(66,SIGN( + getDecValue(get2bit(65,Sc[getBlockIdx(65)]))),&S[getBlockIdx(66)]);
set2bit(67,SIGN( + getDecValue(get2bit(66,Sc[getBlockIdx(66)]))),&S[getBlockIdx(67)]);
set2bit(68,SIGN( + getDecValue(get2bit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(68)]);
set2bit(69,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + 2),&S[getBlockIdx(69)]);
set2bit(70,SIGN( + getDecValue(get2bit(23,Sc[getBlockIdx(23)])) + getDecValue(get2bit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(70)]);
set2bit(71,SIGN( - (getDecValue(get2bit(35,Sc[getBlockIdx(35)]))) - (getDecValue(get2bit(83,Sc[getBlockIdx(83)])))),&S[getBlockIdx(71)]);
set2bit(72,SIGN( - (getDecValue(get2bit(1,Sc[getBlockIdx(1)]))) + getDecValue(get2bit(11,Sc[getBlockIdx(11)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(87,Sc[getBlockIdx(87)])) + 1),&S[getBlockIdx(72)]);
set2bit(73,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) + getDecValue(get2bit(45,Sc[getBlockIdx(45)])) - (getDecValue(get2bit(47,Sc[getBlockIdx(47)]))) + getDecValue(get2bit(66,Sc[getBlockIdx(66)])) + getDecValue(get2bit(67,Sc[getBlockIdx(67)]))),&S[getBlockIdx(73)]);
set2bit(74,SIGN( + getDecValue(get2bit(73,Sc[getBlockIdx(73)]))),&S[getBlockIdx(74)]);
set2bit(75,SIGN( + getDecValue(get2bit(66,Sc[getBlockIdx(66)])) + getDecValue(get2bit(74,Sc[getBlockIdx(74)]))),&S[getBlockIdx(75)]);
set2bit(76,SIGN( - (getDecValue(get2bit(21,Sc[getBlockIdx(21)]))) - (getDecValue(get2bit(32,Sc[getBlockIdx(32)]))) - (getDecValue(get2bit(95,Sc[getBlockIdx(95)]))) + 3),&S[getBlockIdx(76)]);
set2bit(77,SIGN( + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + (-1)),&S[getBlockIdx(77)]);
set2bit(78,SIGN( + getDecValue(get2bit(12,Sc[getBlockIdx(12)])) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(62,Sc[getBlockIdx(62)]))) - (getDecValue(get2bit(91,Sc[getBlockIdx(91)]))) + (-4)),&S[getBlockIdx(78)]);
set2bit(79,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(79)]);
set2bit(80,SIGN( + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + getDecValue(get2bit(54,Sc[getBlockIdx(54)])) + (-1)),&S[getBlockIdx(80)]);
set2bit(81,SIGN( + getDecValue(get2bit(55,Sc[getBlockIdx(55)])) + getDecValue(get2bit(57,Sc[getBlockIdx(57)])) + (-1)),&S[getBlockIdx(81)]);
set2bit(82,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(82)]);
set2bit(83,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + getDecValue(get2bit(59,Sc[getBlockIdx(59)]))),&S[getBlockIdx(83)]);
set2bit(84,SIGN( + getDecValue(get2bit(31,Sc[getBlockIdx(31)])) - (getDecValue(get2bit(82,Sc[getBlockIdx(82)])))),&S[getBlockIdx(84)]);
set2bit(85,SIGN( - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) + 1),&S[getBlockIdx(85)]);
set2bit(86,SIGN( + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)]))),&S[getBlockIdx(86)]);
set2bit(87,SIGN( + getDecValue(get2bit(9,Sc[getBlockIdx(9)]))),&S[getBlockIdx(87)]);
set2bit(88,SIGN( + getDecValue(get2bit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(88)]);
set2bit(89,SIGN( + getDecValue(get2bit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(89)]);
set2bit(90,SIGN( + getDecValue(get2bit(24,Sc[getBlockIdx(24)])) + getDecValue(get2bit(86,Sc[getBlockIdx(86)]))),&S[getBlockIdx(90)]);
set2bit(91,SIGN( - (getDecValue(get2bit(90,Sc[getBlockIdx(90)]))) + 1),&S[getBlockIdx(91)]);
set2bit(92,SIGN( + getDecValue(get2bit(7,Sc[getBlockIdx(7)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + (-1)),&S[getBlockIdx(92)]);
set2bit(93,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + (-1)),&S[getBlockIdx(93)]);
set2bit(94,SIGN( + getDecValue(get2bit(93,Sc[getBlockIdx(93)]))),&S[getBlockIdx(94)]);
set2bit(95,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(61,Sc[getBlockIdx(61)])) + (-1)),&S[getBlockIdx(95)]);
}
__global__
void findAttractor(uint64 *attractors, uint32_t *transients, uint32_t *periods, uint32_t numThreads){
int transient = 0, period = 0;
uint64 S0[N],S1[N];
uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t step = NUM_STATES / NUM_COPYS;
uint32_t rest = NUM_STATES % NUM_COPYS;
uint32_t begin = 0;
uint32_t end = step - 1;
bool flag = true;
if(thread < numThreads){
if(rest > 0){
end = end + 1;
rest = rest - 1;
}else{
flag = false;
}
for(uint32_t i = 0; i < NUM_COPYS;i++){
if(i == thread) break;
if(rest > 0){
end = end + 1;
begin = begin + 1;
rest = rest - 1;
}
else if(rest == 0 && flag){
begin = begin + 1;
flag = 0;
}
begin += step;
end += step;
}
for (int i = begin; i < end; ++i) {
S0[0] = S1[0] = thread;
//S0[1] = S1[1] = ;// inicializar com rand
//S0[2] = S1[2] = ;// inicializar com rand
set2bit(6,1,&S0[getBlockIdx(6)]); //Obrigatrio **Conferir se esta setando o bit certo**
set2bit(7,1,&S1[getBlockIdx(7)]); //Obrigatrio **Conferir se esta setando o bit certo**
transient = 0;
period = 0;
do{
pass(S0);
pass(S0);
pass(S1);
transient++;
}while(!comp(S0,S1));
do{
pass(S0);
period++;
}while (!comp(S0,S1));
period--;
transients[i] = transient;
periods[i]= periodo;
for(int s = 0; s < N; s++){
attractors[i * N + s] = S0[s];
}
//cout << transient << " " << period << std::endl;
}
return 0;
}
int main() {
}
| 9ecf8d33605e23bbef3ac4dc0885060b93baa45f.cu | #include <iostream>
using namespace std;
#define SIGN(x) (x > 1) ? 1 : ((x < -1) ? -1 : x)
#define NUM_STATES (1<<20)
#define NUM_COPYS (1 << 10)
#define NUM_NOS 96
#define N 3
typedef unsigned long uint64;
__device__
bool comp(uint64 *S0, uint64 *S1){
for (int i = 0; i < N; ++i) {
if(S1[i] != S0[i])
return false;
}
return true;
}
__device__
short getDecValue(short v){
short vp = 0;
switch(v){
case 0:
case 2: vp = 0;
break;
case 1: vp = 1;
break;
case 3: vp = -1;
break;
}
return vp;
}
__device__
short get2bit(short idx,uint64 v){
idx = idx*2;
return (v >> idx) & 3;
}
__device__
void set2bit(short idx, short newV, uint64 *v){
uint64 mask = 3;
idx = idx*2;
newV = (newV == -1)? 3 : newV;
*v &= ~(mask << idx);
*v |= ((uint64)newV << idx);
}
__device__
short getBlockIdx(short idx){
idx = idx*2;
if(idx<64)
return 0;
else if(idx>127)
return 2;
else
return 1;
}
__device__
void pass(uint64 *S) {
uint64 Sc[N];
for (int i = 0; i < N; ++i) {
Sc[i] = S[i];
}
set2bit(0,1,&S[getBlockIdx(0)]);
set2bit(1,1,&S[getBlockIdx(1)]);
set2bit(2,-1,&S[getBlockIdx(2)]);
set2bit(3,0,&S[getBlockIdx(3)]);
set2bit(4,-1,&S[getBlockIdx(4)]);
set2bit(5,0,&S[getBlockIdx(5)]);
set2bit(6,1,&S[getBlockIdx(6)]);
set2bit(7,1,&S[getBlockIdx(7)]);
set2bit(8,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(8)]);
set2bit(9,SIGN( + getDecValue(get2bit(0,Sc[getBlockIdx(0)])) + getDecValue(get2bit(71,Sc[getBlockIdx(71)]))),&S[getBlockIdx(9)]);
set2bit(10,SIGN( + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(60,Sc[getBlockIdx(60)])) + (-1)),&S[getBlockIdx(10)]);
set2bit(11,SIGN( - (getDecValue(get2bit(2,Sc[getBlockIdx(2)]))) + 1),&S[getBlockIdx(11)]);
set2bit(12,SIGN( - (getDecValue(get2bit(13,Sc[getBlockIdx(13)]))) + 1),&S[getBlockIdx(12)]);
set2bit(13,SIGN( + getDecValue(get2bit(14,Sc[getBlockIdx(14)])) + getDecValue(get2bit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(13)]);
set2bit(14,SIGN( + getDecValue(get2bit(1,Sc[getBlockIdx(1)]))),&S[getBlockIdx(14)]);
set2bit(15,SIGN( + getDecValue(get2bit(2,Sc[getBlockIdx(2)])) - (getDecValue(get2bit(4,Sc[getBlockIdx(4)])))),&S[getBlockIdx(15)]);
set2bit(16,SIGN( - (getDecValue(get2bit(12,Sc[getBlockIdx(12)]))) + getDecValue(get2bit(14,Sc[getBlockIdx(14)])) + 1),&S[getBlockIdx(16)]);
set2bit(17,SIGN( + getDecValue(get2bit(16,Sc[getBlockIdx(16)])) + getDecValue(get2bit(78,Sc[getBlockIdx(78)]))),&S[getBlockIdx(17)]);
set2bit(18,SIGN( + getDecValue(get2bit(17,Sc[getBlockIdx(17)])) - (getDecValue(get2bit(7,Sc[getBlockIdx(7)]))) - (getDecValue(get2bit(92,Sc[getBlockIdx(92)]))) + 1),&S[getBlockIdx(18)]);
set2bit(19,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(19)]);
set2bit(20,SIGN( + getDecValue(get2bit(13,Sc[getBlockIdx(13)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)])) - (getDecValue(get2bit(37,Sc[getBlockIdx(37)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(82,Sc[getBlockIdx(82)]))),&S[getBlockIdx(20)]);
set2bit(21,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(20,Sc[getBlockIdx(20)])) + getDecValue(get2bit(20,Sc[getBlockIdx(20)])) - (getDecValue(get2bit(76,Sc[getBlockIdx(76)]))) + getDecValue(get2bit(95,Sc[getBlockIdx(95)])) + (-1)),&S[getBlockIdx(21)]);
set2bit(22,SIGN( + getDecValue(get2bit(13,Sc[getBlockIdx(13)])) + getDecValue(get2bit(16,Sc[getBlockIdx(16)]))),&S[getBlockIdx(22)]);
set2bit(23,SIGN( + getDecValue(get2bit(22,Sc[getBlockIdx(22)]))),&S[getBlockIdx(23)]);
set2bit(24,SIGN( + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + getDecValue(get2bit(23,Sc[getBlockIdx(23)]))),&S[getBlockIdx(24)]);
set2bit(25,SIGN( + getDecValue(get2bit(18,Sc[getBlockIdx(18)])) + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + (-1)),&S[getBlockIdx(25)]);
set2bit(26,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(26)]);
set2bit(27,SIGN( + getDecValue(get2bit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(27)]);
set2bit(28,SIGN( + getDecValue(get2bit(7,Sc[getBlockIdx(7)])) + 1),&S[getBlockIdx(28)]);
set2bit(29,SIGN( - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) - (getDecValue(get2bit(27,Sc[getBlockIdx(27)]))) - (getDecValue(get2bit(33,Sc[getBlockIdx(33)]))) + 3),&S[getBlockIdx(29)]);
set2bit(30,SIGN( + getDecValue(get2bit(28,Sc[getBlockIdx(28)])) + getDecValue(get2bit(29,Sc[getBlockIdx(29)])) + (-1)),&S[getBlockIdx(30)]);
set2bit(31,SIGN( - (getDecValue(get2bit(30,Sc[getBlockIdx(30)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + 1),&S[getBlockIdx(31)]);
set2bit(32,SIGN( - (getDecValue(get2bit(10,Sc[getBlockIdx(10)]))) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)]))),&S[getBlockIdx(32)]);
set2bit(33,SIGN( + getDecValue(get2bit(15,Sc[getBlockIdx(15)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(42,Sc[getBlockIdx(42)])) - (getDecValue(get2bit(72,Sc[getBlockIdx(72)]))) + (-1)),&S[getBlockIdx(33)]);
set2bit(34,SIGN( + getDecValue(get2bit(4,Sc[getBlockIdx(4)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)])) - (getDecValue(get2bit(36,Sc[getBlockIdx(36)]))) - (getDecValue(get2bit(36,Sc[getBlockIdx(36)]))) - (getDecValue(get2bit(37,Sc[getBlockIdx(37)]))) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(69,Sc[getBlockIdx(69)]))) + 2),&S[getBlockIdx(34)]);
set2bit(35,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(35)]);
set2bit(36,SIGN( - (getDecValue(get2bit(4,Sc[getBlockIdx(4)]))) - (getDecValue(get2bit(71,Sc[getBlockIdx(71)]))) + 1),&S[getBlockIdx(36)]);
set2bit(37,SIGN( - (getDecValue(get2bit(4,Sc[getBlockIdx(4)]))) + getDecValue(get2bit(71,Sc[getBlockIdx(71)])) + 1),&S[getBlockIdx(37)]);
set2bit(38,SIGN( - (getDecValue(get2bit(8,Sc[getBlockIdx(8)]))) + getDecValue(get2bit(39,Sc[getBlockIdx(39)])) + getDecValue(get2bit(6,Sc[getBlockIdx(6)])) - (getDecValue(get2bit(40,Sc[getBlockIdx(40)]))) - (getDecValue(get2bit(63,Sc[getBlockIdx(63)]))) + (-1)),&S[getBlockIdx(38)]);
set2bit(39,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(23,Sc[getBlockIdx(23)])) - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + getDecValue(get2bit(70,Sc[getBlockIdx(70)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)])) + getDecValue(get2bit(5,Sc[getBlockIdx(5)])) + (-1)),&S[getBlockIdx(39)]);
set2bit(40,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(40)]);
set2bit(41,SIGN( - (getDecValue(get2bit(22,Sc[getBlockIdx(22)]))) - (getDecValue(get2bit(23,Sc[getBlockIdx(23)]))) - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(72,Sc[getBlockIdx(72)])) + 1),&S[getBlockIdx(41)]);
set2bit(42,SIGN( - (getDecValue(get2bit(41,Sc[getBlockIdx(41)]))) + 1),&S[getBlockIdx(42)]);
set2bit(43,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) - (getDecValue(get2bit(60,Sc[getBlockIdx(60)]))) + getDecValue(get2bit(88,Sc[getBlockIdx(88)])) + 1),&S[getBlockIdx(43)]);
set2bit(44,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(45,Sc[getBlockIdx(45)]))) - (getDecValue(get2bit(46,Sc[getBlockIdx(46)])))),&S[getBlockIdx(44)]);
set2bit(45,SIGN( - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) + getDecValue(get2bit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(45)]);
set2bit(46,SIGN( - (getDecValue(get2bit(22,Sc[getBlockIdx(22)]))) - (getDecValue(get2bit(24,Sc[getBlockIdx(24)]))) - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) - (getDecValue(get2bit(34,Sc[getBlockIdx(34)]))) + 1),&S[getBlockIdx(46)]);
set2bit(47,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(46,Sc[getBlockIdx(46)]))) + 1),&S[getBlockIdx(47)]);
set2bit(48,SIGN( - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) - (getDecValue(get2bit(53,Sc[getBlockIdx(53)]))) - (getDecValue(get2bit(54,Sc[getBlockIdx(54)]))) - (getDecValue(get2bit(60,Sc[getBlockIdx(60)]))) + 2),&S[getBlockIdx(48)]);
set2bit(49,SIGN( - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + 1),&S[getBlockIdx(49)]);
set2bit(50,SIGN( + getDecValue(get2bit(16,Sc[getBlockIdx(16)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + (-3)),&S[getBlockIdx(50)]);
set2bit(51,SIGN( + getDecValue(get2bit(51,Sc[getBlockIdx(51)])) - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) - (getDecValue(get2bit(56,Sc[getBlockIdx(56)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) + getDecValue(get2bit(80,Sc[getBlockIdx(80)])) + getDecValue(get2bit(81,Sc[getBlockIdx(81)]))),&S[getBlockIdx(51)]);
set2bit(52,SIGN( - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)]))) - (getDecValue(get2bit(56,Sc[getBlockIdx(56)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) + 1),&S[getBlockIdx(52)]);
set2bit(53,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)]))) - (getDecValue(get2bit(64,Sc[getBlockIdx(64)]))) - (getDecValue(get2bit(69,Sc[getBlockIdx(69)]))) + getDecValue(get2bit(70,Sc[getBlockIdx(70)])) + getDecValue(get2bit(84,Sc[getBlockIdx(84)])) + getDecValue(get2bit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(53)]);
set2bit(54,SIGN( - (getDecValue(get2bit(48,Sc[getBlockIdx(48)]))) + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(58,Sc[getBlockIdx(58)]))) - (getDecValue(get2bit(59,Sc[getBlockIdx(59)])))),&S[getBlockIdx(54)]);
set2bit(55,SIGN( - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) + getDecValue(get2bit(56,Sc[getBlockIdx(56)])) + 1),&S[getBlockIdx(55)]);
set2bit(56,SIGN( + getDecValue(get2bit(52,Sc[getBlockIdx(52)])) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)])))),&S[getBlockIdx(56)]);
set2bit(57,SIGN( + getDecValue(get2bit(51,Sc[getBlockIdx(51)])) + getDecValue(get2bit(52,Sc[getBlockIdx(52)])) - (getDecValue(get2bit(55,Sc[getBlockIdx(55)]))) + getDecValue(get2bit(56,Sc[getBlockIdx(56)])) + getDecValue(get2bit(57,Sc[getBlockIdx(57)]))),&S[getBlockIdx(57)]);
set2bit(58,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) - (getDecValue(get2bit(51,Sc[getBlockIdx(51)]))) - (getDecValue(get2bit(52,Sc[getBlockIdx(52)]))) - (getDecValue(get2bit(53,Sc[getBlockIdx(53)]))) + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) + 1),&S[getBlockIdx(58)]);
set2bit(59,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) - (getDecValue(get2bit(78,Sc[getBlockIdx(78)]))) + 1),&S[getBlockIdx(59)]);
set2bit(60,SIGN( + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(50,Sc[getBlockIdx(50)]))) - (getDecValue(get2bit(87,Sc[getBlockIdx(87)]))) + 1),&S[getBlockIdx(60)]);
set2bit(61,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)])) + getDecValue(get2bit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(61)]);
set2bit(62,SIGN( + getDecValue(get2bit(61,Sc[getBlockIdx(61)])) + getDecValue(get2bit(85,Sc[getBlockIdx(85)])) + (-1)),&S[getBlockIdx(62)]);
set2bit(63,SIGN( + getDecValue(get2bit(61,Sc[getBlockIdx(61)]))),&S[getBlockIdx(63)]);
set2bit(64,SIGN( + getDecValue(get2bit(62,Sc[getBlockIdx(62)])) + getDecValue(get2bit(85,Sc[getBlockIdx(85)]))),&S[getBlockIdx(64)]);
set2bit(65,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(65)]);
set2bit(66,SIGN( + getDecValue(get2bit(65,Sc[getBlockIdx(65)]))),&S[getBlockIdx(66)]);
set2bit(67,SIGN( + getDecValue(get2bit(66,Sc[getBlockIdx(66)]))),&S[getBlockIdx(67)]);
set2bit(68,SIGN( + getDecValue(get2bit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(68)]);
set2bit(69,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + 2),&S[getBlockIdx(69)]);
set2bit(70,SIGN( + getDecValue(get2bit(23,Sc[getBlockIdx(23)])) + getDecValue(get2bit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(70)]);
set2bit(71,SIGN( - (getDecValue(get2bit(35,Sc[getBlockIdx(35)]))) - (getDecValue(get2bit(83,Sc[getBlockIdx(83)])))),&S[getBlockIdx(71)]);
set2bit(72,SIGN( - (getDecValue(get2bit(1,Sc[getBlockIdx(1)]))) + getDecValue(get2bit(11,Sc[getBlockIdx(11)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(87,Sc[getBlockIdx(87)])) + 1),&S[getBlockIdx(72)]);
set2bit(73,SIGN( - (getDecValue(get2bit(25,Sc[getBlockIdx(25)]))) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) - (getDecValue(get2bit(44,Sc[getBlockIdx(44)]))) + getDecValue(get2bit(45,Sc[getBlockIdx(45)])) - (getDecValue(get2bit(47,Sc[getBlockIdx(47)]))) + getDecValue(get2bit(66,Sc[getBlockIdx(66)])) + getDecValue(get2bit(67,Sc[getBlockIdx(67)]))),&S[getBlockIdx(73)]);
set2bit(74,SIGN( + getDecValue(get2bit(73,Sc[getBlockIdx(73)]))),&S[getBlockIdx(74)]);
set2bit(75,SIGN( + getDecValue(get2bit(66,Sc[getBlockIdx(66)])) + getDecValue(get2bit(74,Sc[getBlockIdx(74)]))),&S[getBlockIdx(75)]);
set2bit(76,SIGN( - (getDecValue(get2bit(21,Sc[getBlockIdx(21)]))) - (getDecValue(get2bit(32,Sc[getBlockIdx(32)]))) - (getDecValue(get2bit(95,Sc[getBlockIdx(95)]))) + 3),&S[getBlockIdx(76)]);
set2bit(77,SIGN( + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + (-1)),&S[getBlockIdx(77)]);
set2bit(78,SIGN( + getDecValue(get2bit(12,Sc[getBlockIdx(12)])) + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(25,Sc[getBlockIdx(25)])) + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) - (getDecValue(get2bit(62,Sc[getBlockIdx(62)]))) - (getDecValue(get2bit(91,Sc[getBlockIdx(91)]))) + (-4)),&S[getBlockIdx(78)]);
set2bit(79,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(79)]);
set2bit(80,SIGN( + getDecValue(get2bit(49,Sc[getBlockIdx(49)])) + getDecValue(get2bit(54,Sc[getBlockIdx(54)])) + (-1)),&S[getBlockIdx(80)]);
set2bit(81,SIGN( + getDecValue(get2bit(55,Sc[getBlockIdx(55)])) + getDecValue(get2bit(57,Sc[getBlockIdx(57)])) + (-1)),&S[getBlockIdx(81)]);
set2bit(82,SIGN( + getDecValue(get2bit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(82)]);
set2bit(83,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + getDecValue(get2bit(59,Sc[getBlockIdx(59)]))),&S[getBlockIdx(83)]);
set2bit(84,SIGN( + getDecValue(get2bit(31,Sc[getBlockIdx(31)])) - (getDecValue(get2bit(82,Sc[getBlockIdx(82)])))),&S[getBlockIdx(84)]);
set2bit(85,SIGN( - (getDecValue(get2bit(38,Sc[getBlockIdx(38)]))) + 1),&S[getBlockIdx(85)]);
set2bit(86,SIGN( + getDecValue(get2bit(19,Sc[getBlockIdx(19)])) + getDecValue(get2bit(33,Sc[getBlockIdx(33)]))),&S[getBlockIdx(86)]);
set2bit(87,SIGN( + getDecValue(get2bit(9,Sc[getBlockIdx(9)]))),&S[getBlockIdx(87)]);
set2bit(88,SIGN( + getDecValue(get2bit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(88)]);
set2bit(89,SIGN( + getDecValue(get2bit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(89)]);
set2bit(90,SIGN( + getDecValue(get2bit(24,Sc[getBlockIdx(24)])) + getDecValue(get2bit(86,Sc[getBlockIdx(86)]))),&S[getBlockIdx(90)]);
set2bit(91,SIGN( - (getDecValue(get2bit(90,Sc[getBlockIdx(90)]))) + 1),&S[getBlockIdx(91)]);
set2bit(92,SIGN( + getDecValue(get2bit(7,Sc[getBlockIdx(7)])) + getDecValue(get2bit(43,Sc[getBlockIdx(43)])) + (-1)),&S[getBlockIdx(92)]);
set2bit(93,SIGN( + getDecValue(get2bit(34,Sc[getBlockIdx(34)])) + getDecValue(get2bit(38,Sc[getBlockIdx(38)])) + (-1)),&S[getBlockIdx(93)]);
set2bit(94,SIGN( + getDecValue(get2bit(93,Sc[getBlockIdx(93)]))),&S[getBlockIdx(94)]);
set2bit(95,SIGN( + getDecValue(get2bit(21,Sc[getBlockIdx(21)])) - (getDecValue(get2bit(29,Sc[getBlockIdx(29)]))) - (getDecValue(get2bit(43,Sc[getBlockIdx(43)]))) + getDecValue(get2bit(61,Sc[getBlockIdx(61)])) + (-1)),&S[getBlockIdx(95)]);
}
__global__
void findAttractor(uint64 *attractors, uint32_t *transients, uint32_t *periods, uint32_t numThreads){
int transient = 0, period = 0;
uint64 S0[N],S1[N];
uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t step = NUM_STATES / NUM_COPYS;
uint32_t rest = NUM_STATES % NUM_COPYS;
uint32_t begin = 0;
uint32_t end = step - 1;
bool flag = true;
if(thread < numThreads){
if(rest > 0){
end = end + 1;
rest = rest - 1;
}else{
flag = false;
}
for(uint32_t i = 0; i < NUM_COPYS;i++){
if(i == thread) break;
if(rest > 0){
end = end + 1;
begin = begin + 1;
rest = rest - 1;
}
else if(rest == 0 && flag){
begin = begin + 1;
flag = 0;
}
begin += step;
end += step;
}
for (int i = begin; i < end; ++i) {
S0[0] = S1[0] = thread;
//S0[1] = S1[1] = ;// inicializar com rand
//S0[2] = S1[2] = ;// inicializar com rand
set2bit(6,1,&S0[getBlockIdx(6)]); //Obrigatório **Conferir se esta setando o bit certo**
set2bit(7,1,&S1[getBlockIdx(7)]); //Obrigatório **Conferir se esta setando o bit certo**
transient = 0;
period = 0;
do{
pass(S0);
pass(S0);
pass(S1);
transient++;
}while(!comp(S0,S1));
do{
pass(S0);
period++;
}while (!comp(S0,S1));
period--;
transients[i] = transient;
periods[i]= periodo;
for(int s = 0; s < N; s++){
attractors[i * N + s] = S0[s];
}
//cout << transient << " " << period << std::endl;
}
return 0;
}
int main() {
}
|
4b4a9b776b2b5c1c4535e3525fd6b07a00879420.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
/* derivative of helva, from Mathematica */
__device__ hipComplex helvp(hipComplex z)
{
hipComplex out(jnf(2,z.r),jnf(1,z.i));
return out;
}
__device__ hipComplex harva(hipComplex z)
{
hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i));
return out;
}
__device__ hipComplex herve(hipComplex z)
{
hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r));
return out;
}
__device__ hipComplex alver(hipComplex z)
{
hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ hipComplex alvir(hipComplex z)
{
hipComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ hipComplex hexva(int m, hipComplex z)
{
hipComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex ahilv(hipComplex z)
{
hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex aciwa(hipComplex z)
{
hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thy(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex origo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale =10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex vue = cue;
hipComplex rhuva(3.0,0.0);
hipComplex rarva(3.0,0.0);
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<15;v++)
{
/* stripes all over the place */
/*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
cue = cue - arago(alvir(cue),flat(uon*hilva(cue-aon*helva(cue))/helva(cue+aon*ai*hilva(cue)))*aon/alvir(cue));
accume = accume + uon*alvir(cue-aon*alvir(cue));
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 4b4a9b776b2b5c1c4535e3525fd6b07a00879420.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
/* derivative of helva, from Mathematica */
__device__ cuComplex helvp(cuComplex z)
{
cuComplex out(jnf(2,z.r),jnf(1,z.i));
return out;
}
__device__ cuComplex harva(cuComplex z)
{
cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i));
return out;
}
__device__ cuComplex herve(cuComplex z)
{
cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r));
return out;
}
__device__ cuComplex alver(cuComplex z)
{
cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ cuComplex alvir(cuComplex z)
{
cuComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ cuComplex hexva(int m, cuComplex z)
{
cuComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex ahilv(cuComplex z)
{
cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex aciwa(cuComplex z)
{
cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thy(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex origo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale =10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex vue = cue;
cuComplex rhuva(3.0,0.0);
cuComplex rarva(3.0,0.0);
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<15;v++)
{
/* stripes all over the place */
/*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
cue = cue - arago(alvir(cue),flat(uon*hilva(cue-aon*helva(cue))/helva(cue+aon*ai*hilva(cue)))*aon/alvir(cue));
accume = accume + uon*alvir(cue-aon*alvir(cue));
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
04b340ca337a7ff390c314bb4031e7fb2a437e5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void backwardMaxPoolingKernel (
int batchSize,
int* lengths,
float symbolForUnusedColumns,
int numberEntriesPerInstance,
int numberRows,
int* maxIndices,
float* chain,
float* result) {
int indexInstance = blockIdx.x;
int indexRow = blockIdx.y;
int indexColumn = threadIdx.x;
int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance;
int startColumnWithinInstance = indexColumn * numberRows;
int indexEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + indexRow;
if(indexInstance < batchSize) {
int length = lengths[indexInstance];
if(indexColumn < length) {
int maxIndex = maxIndices[indexInstance * numberRows + indexRow];
if(indexEntryWithinBatch == maxIndex) {
result[indexEntryWithinBatch] = chain[indexInstance * numberRows + indexRow];
}
else {
result[indexEntryWithinBatch] = 0.0;
}
}
else {
result[indexEntryWithinBatch] = symbolForUnusedColumns;
}
}
else {
result[indexEntryWithinBatch] = symbolForUnusedColumns;
}
} | 04b340ca337a7ff390c314bb4031e7fb2a437e5c.cu | __global__ void backwardMaxPoolingKernel (
int batchSize,
int* lengths,
float symbolForUnusedColumns,
int numberEntriesPerInstance,
int numberRows,
int* maxIndices,
float* chain,
float* result) {
int indexInstance = blockIdx.x;
int indexRow = blockIdx.y;
int indexColumn = threadIdx.x;
int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance;
int startColumnWithinInstance = indexColumn * numberRows;
int indexEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + indexRow;
if(indexInstance < batchSize) {
int length = lengths[indexInstance];
if(indexColumn < length) {
int maxIndex = maxIndices[indexInstance * numberRows + indexRow];
if(indexEntryWithinBatch == maxIndex) {
result[indexEntryWithinBatch] = chain[indexInstance * numberRows + indexRow];
}
else {
result[indexEntryWithinBatch] = 0.0;
}
}
else {
result[indexEntryWithinBatch] = symbolForUnusedColumns;
}
}
else {
result[indexEntryWithinBatch] = symbolForUnusedColumns;
}
} |
2978a6fd1f38664039ce67d57621198c0829d0ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#define BLOCKSIZE 1024
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.type().is_cuda(), "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(hipGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
hipLaunchKernelGGL(( SwishForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data<scalar_t>(),
activations.contiguous().data<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.type().is_cuda(), "grad should be cuda");
AT_ASSERTM(feat.type().is_cuda(), "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
hipLaunchKernelGGL(( SwishBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data<scalar_t>(),
grad.contiguous().data<scalar_t>(),
grad_feat.contiguous().data<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (!feat.type().is_cuda()) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (!feat.type().is_cuda()) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
}
| 2978a6fd1f38664039ce67d57621198c0829d0ed.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#define BLOCKSIZE 1024
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.type().is_cuda(), "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(cudaGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
SwishForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data<scalar_t>(),
activations.contiguous().data<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.type().is_cuda(), "grad should be cuda");
AT_ASSERTM(feat.type().is_cuda(), "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
SwishBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data<scalar_t>(),
grad.contiguous().data<scalar_t>(),
grad_feat.contiguous().data<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (!feat.type().is_cuda()) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (!feat.type().is_cuda()) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
}
|
75999e2ab200e6a429126b3e253b27a625d069f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_with_value_util_kernel( double2 * __restrict buf, double v, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
double2 val;
val.x = v;
val.y = v;
buf[elem_id] = val;
}
} | 75999e2ab200e6a429126b3e253b27a625d069f6.cu | #include "includes.h"
__global__ void set_with_value_util_kernel( double2 * __restrict buf, double v, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
double2 val;
val.x = v;
val.y = v;
buf[elem_id] = val;
}
} |
989326aa598ef2a4d1e9b1ffc396ca1748cdd652.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void binaryTestingKernel (
int batchStart,
int length,
float* predictions,
float* targets,
int* result) {
int withinBatch = blockIdx.x;
int instanceStart = batchStart + withinBatch * length;
int instanceEnd = instanceStart + length;
for(int indexEntry = instanceStart; indexEntry < instanceEnd; indexEntry++) {
float prediction = predictions[indexEntry];
float target = targets[indexEntry];
result[indexEntry] = (prediction < 0.5 && target = 0.0) || (prediction >= 0.5 && target = 1.0);
}
} | 989326aa598ef2a4d1e9b1ffc396ca1748cdd652.cu | __global__ void binaryTestingKernel (
int batchStart,
int length,
float* predictions,
float* targets,
int* result) {
int withinBatch = blockIdx.x;
int instanceStart = batchStart + withinBatch * length;
int instanceEnd = instanceStart + length;
for(int indexEntry = instanceStart; indexEntry < instanceEnd; indexEntry++) {
float prediction = predictions[indexEntry];
float target = targets[indexEntry];
result[indexEntry] = (prediction < 0.5 && target = 0.0) || (prediction >= 0.5 && target = 1.0);
}
} |
31c56385eca83ea3ea3a9b7c28cd00c45c83eedb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i);
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i);
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
hipMemset(grad_xyz1,0,b*n*3*4);
hipMemset(grad_xyz2,0,b*m*3*4);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
#endif
| 31c56385eca83ea3ea3a9b7c28cd00c45c83eedb.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i);
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i);
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
cudaMemset(grad_xyz1,0,b*n*3*4);
cudaMemset(grad_xyz2,0,b*m*3*4);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
#endif
|
566dea324388f7ab66e7a948b5402362aefa3387.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
// Includes CUDA
//#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//
#include <memory.h>
#include "device_launch_parameters.h"
#include <hipfft.h>
//#include <cufftw.h> // ** cuFFT also comes with CPU-version FFTW, but seems not to work when image size is large.
#include "fftw3.h"
#include "cukernel.cuh"
extern "C" {
#include "powell.h"
}
#include "apifunc_internal.h"
#define SMALLVALUE 0.01
#define NDIM 12
hipError_t cudaStatus;
#define cudaCheckErrors(msg) \
do { \
cudaStatus = hipGetLastError(); \
if (cudaStatus != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(cudaStatus), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
extern "C"
bool isPow2(int x)
{
return ((x&(x - 1)) == 0);
};
//Round a / b to nearest higher integer value
inline long long int iDivUp(long long int a, long long int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
inline long long int iAlignUp(long long int a, long long int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
int snapTransformSize(int dataSize)//
{
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--)
if (dataSize & (1U << hiBit))
{
break;
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize)
{
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 128)
{
return hiPOT;
}
else
{
return iAlignUp(dataSize, 64);
}
}
//////////////// Basic math functions /////////////////
// CPU functions
// sum
template <class T>
double sumcpu(T *h_idata, size_t totalSize) {
double sumValue = 0;
for (size_t i = 0; i < totalSize; i++) {
sumValue += (double)h_idata[i];
}
return sumValue;
}
template double sumcpu<int>(int *h_idata, size_t totalSize);
template double sumcpu<float>(float *h_idata, size_t totalSize);
template double sumcpu<double>(double *h_idata, size_t totalSize);
// add
template <class T>
void addcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] + h_idata2[i];
}
template void addcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void addcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void addcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
template <class T>
void addvaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize){
const T b = h_idata2;
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] + b;
}
template void addvaluecpu<int>(int *h_odata, int *h_idata1, int h_idata2, size_t totalSize);
template void addvaluecpu<float>(float *h_odata, float *h_idata1, float h_idata2, size_t totalSize);
template void addvaluecpu<double>(double *h_odata, double *h_idata1, double h_idata2, size_t totalSize);
// subtract
template <class T>
void subcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] - h_idata2[i];
}
template void subcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void subcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void subcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
// multiply
template <class T>
void multicpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] * h_idata2[i];
}
template void multicpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void multicpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void multicpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
//divide
template <class T>
void divcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] / h_idata2[i];
}
template void divcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void divcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void divcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
template <class T>
void multivaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] * h_idata2;
}
template void multivaluecpu<int>(int *h_odata, int *h_idata1, int h_idata2, size_t totalSize);
template void multivaluecpu<float>(float *h_odata, float *h_idata1, float h_idata2, size_t totalSize);
template void multivaluecpu<double>(double *h_odata, double *h_idata1, double h_idata2, size_t totalSize);
extern "C"
void multicomplexcpu(fComplex *h_odata, fComplex *h_idata1, fComplex *h_idata2, size_t totalSize){
fComplex a;
fComplex b;
for (size_t i = 0; i < totalSize; i++){
a = h_idata1[i];
b = h_idata2[i];
h_odata[i].x = a.x*b.x - a.y*b.y;
h_odata[i].y = a.x*b.y + a.y*b.x;
}
}
// max3Dcpu: find max value and coordinates
template <class T>
T max3Dcpu(size_t *corXYZ, T *h_idata, size_t sx, size_t sy, size_t sz) {
T peakValue = h_idata[0];
T t;
size_t sx0 = 0, sy0 = 0, sz0 = 0;
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
t = h_idata[i + j * sx + k * sx * sy];
if (peakValue < t) {
peakValue = t;
sx0 = i;
sy0 = j;
sz0 = k;
}
}
}
}
corXYZ[0] = sx0; corXYZ[1] = sy0; corXYZ[2] = sz0;
return peakValue;
}
template int max3Dcpu<int>(size_t *corXYZ, int *h_idata, size_t sx, size_t sy, size_t sz);
template float max3Dcpu<float>(size_t *corXYZ, float *h_idata, size_t sx, size_t sy, size_t sz);
template double max3Dcpu<double>(size_t *corXYZ, double *h_idata, size_t sx, size_t sy, size_t sz);
// max with a single value
template <class T>
void maxvaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize) {
T a;
const T b = h_idata2;
for (size_t i = 0; i < totalSize; i++) {
a = h_idata1[i];
h_odata[i] = (a > b) ? a : b;
}
}
template void maxvaluecpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t totalSize);
template void maxvaluecpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t totalSize);
template void maxvaluecpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t totalSize);
template <class T>
void changestorageordercpu(T *h_odata, T *h_idata, size_t sx, size_t sy, size_t sz, int orderMode) {
//orderMode
// 1: change tiff storage order to C storage order
//-1: change C storage order to tiff storage order
if (orderMode == 1) {
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
h_odata[i*sy*sz + j*sz + k] = h_idata[k*sy*sx + j*sx + i];
}
}
}
}
else if (orderMode == -1) {//change C storage order to tiff storage order:
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
h_odata[k*sy*sx + j*sx + i] = h_idata[i*sy*sz + j*sz + k];
}
}
}
}
}
template void changestorageordercpu<int>(int *h_odata, int *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordercpu<float>(float *h_odata, float *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordercpu<double>(double *h_odata, double *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
///// GPU functions
//add
template <class T>
void add3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( add3Dkernel<T>) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void add3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void add3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void add3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
// add with a single value
template <class T>
void addvaluegpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( addvaluekernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void addvaluegpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void addvaluegpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void addvaluegpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
//subtract
template <class T>
void sub3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( sub3Dkernel<T>) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void sub3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void sub3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void sub3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
//multiply
template <class T>
void multi3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( multi3Dkernel<T>) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void multi3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void multi3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void multi3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
// multiply with a single value
template <class T>
void multivaluegpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( multivaluekernel<T>) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void multivaluegpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void multivaluegpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void multivaluegpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
//multiply float complex
extern "C"
void multicomplex3Dgpu(fComplex *d_odata, fComplex *d_idata1, fComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( multicomplex3Dkernel), dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
//multiply float complex and do normalization
extern "C"
void multicomplexnorm3Dgpu(fComplex *d_odata, fComplex *d_idata1, fComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( multicomplexnorm3Dkernel) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
//multiply double complex
extern "C"
void multidcomplex3Dgpu(dComplex *d_odata, dComplex *d_idata1, dComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( multidcomplex3Dkernel), dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
//divide
template <class T>
void div3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( div3Dkernel<T>) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void div3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void div3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void div3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
//conjugation of complex
extern "C"
void conj3Dgpu(fComplex *d_odata, fComplex *d_idata, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( conj3Dkernel) , dim3(grids), dim3(threads), 0, 0, d_odata, d_idata, sx, sy, sz);
hipDeviceSynchronize();
}
// sumarization
// sumgpu 1: small data size
template <class T>
T sumgpu(T *d_idata, int totalSize){
int gridSize = iDivUp(totalSize, blockSize);
bool nIsPow2 = isPow2(totalSize);
int smemSize = (blockSize <= 32) ? 2 * blockSize * sizeof(T) : blockSize * sizeof(T);
T *h_temp = NULL, *d_temp = NULL;
h_temp = (T *)malloc(gridSize * sizeof(T));
hipMalloc((void **)&d_temp, gridSize * sizeof(T));
hipLaunchKernelGGL(( sumgpukernel<T>), dim3(gridSize), dim3(blockSize), smemSize, 0,
d_idata,
d_temp,
totalSize,
nIsPow2
);
hipDeviceSynchronize();
hipMemcpy(h_temp, d_temp, gridSize * sizeof(T), hipMemcpyDeviceToHost);
T sumValue = 0;
for (int i = 0; i < gridSize; i++){
sumValue += h_temp[i];
}
free(h_temp);
hipFree(d_temp);
return sumValue;
}
template int sumgpu<int>(int *d_idata, int totalSize);
template float sumgpu<float>(float *d_idata, int totalSize);
template double sumgpu<double>(double *d_idata, int totalSize);
// sumgpu 2: huge data size (3D data)
template <class T>
double sum3Dgpu(T *d_idata, size_t sx, size_t sy, size_t sz){
size_t sxy = sx * sy;
double *h_temp = NULL, *d_temp = NULL;
h_temp = (double *)malloc(sxy * sizeof(double));
hipMalloc((void **)&d_temp, sxy * sizeof(double));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
hipLaunchKernelGGL(( reduceZ<T>) , dim3(grids), dim3(threads) , 0, 0, d_idata, d_temp, sx, sy, sz);
hipDeviceSynchronize();
hipMemcpy(h_temp, d_temp, sxy * sizeof(double), hipMemcpyDeviceToHost);
double sumValue = 0;
for (size_t i = 0; i < sxy; i++)
sumValue += h_temp[i];
free(h_temp);
hipFree(d_temp);
return sumValue;
}
template double sum3Dgpu<int>(int *d_idata, size_t sx, size_t sy, size_t sz);
template double sum3Dgpu<float>(float *d_idata, size_t sx, size_t sy, size_t sz);
template double sum3Dgpu<double>(double *d_idata, size_t sx, size_t sy, size_t sz);
// sumgpu 3: small data (1D data)
template <class T>
T sumgpu1D(T *d_idata, size_t totalSize){
const size_t r = 5; // a rough number, need further optimization
size_t tempSize = r * blockSize;
T *h_temp = NULL, *d_temp = NULL;
h_temp = (T *)malloc(tempSize * sizeof(T));
hipMalloc((void **)&d_temp, tempSize * sizeof(T));
hipLaunchKernelGGL(( sumgpu1Dkernel<T>) , dim3(r), dim3(blockSize) , 0, 0,
d_idata,
d_temp,
totalSize
);
hipDeviceSynchronize();
hipMemcpy(h_temp, d_temp, tempSize * sizeof(T), hipMemcpyDeviceToHost);
T sumValue = 0;
for (int i = 0; i < tempSize; i++){
sumValue += h_temp[i];
}
free(h_temp);
hipFree(d_temp);
return sumValue;
}
template int sumgpu1D<int>(int *d_idata, size_t totalSize);
template float sumgpu1D<float>(float *d_idata, size_t totalSize);
template double sumgpu1D<double>(double *d_idata, size_t totalSize);
// max3Dgpu: find max value and coordinates
template <class T>
T max3Dgpu(size_t *corXYZ, T *d_idata, size_t sx, size_t sy, size_t sz){
size_t sx0 = 0, sy0 = 0, sz0 = 0;
T *d_temp1 = NULL, *h_temp1 = NULL;
size_t *d_temp2 = NULL, *h_temp2 = NULL;
hipMalloc((void **)&d_temp1, sx*sy *sizeof(T));
hipMalloc((void **)&d_temp2, sx*sy *sizeof(size_t));
h_temp1 = (T *)malloc(sx*sy * sizeof(T));
h_temp2 = (size_t *)malloc(sx*sy * sizeof(size_t));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
hipLaunchKernelGGL(( maxZkernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_idata, d_temp1, d_temp2, sx, sy, sz);
hipDeviceSynchronize();
hipMemcpy(h_temp1, d_temp1, sx*sy * sizeof(T), hipMemcpyDeviceToHost);
hipMemcpy(h_temp2, d_temp2, sx*sy * sizeof(size_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
T peakValue = h_temp1[0];
T t;
for (size_t i = 0; i < sx; i++){
for (size_t j = 0; j < sy; j++){
t = h_temp1[i + j * sx];
if (peakValue < t){
peakValue = t;
sx0 = i;
sy0 = j;
sz0 = h_temp2[i + j * sx];
}
}
}
corXYZ[0] = sx0; corXYZ[1] = sy0; corXYZ[2] = sz0;
free(h_temp1); free(h_temp2);
hipFree(d_temp1); hipFree(d_temp2);
return peakValue;
}
template int max3Dgpu<int>(size_t *corXYZ, int *d_idata, size_t sx, size_t sy, size_t sz);
template float max3Dgpu<float>(size_t *corXYZ, float *d_idata, size_t sx, size_t sy, size_t sz);
template double max3Dgpu<double>(size_t *corXYZ, double *d_idata, size_t sx, size_t sy, size_t sz);
// max with a single value
template <class T>
void maxvalue3Dgpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( maxvalue3Dgpukernel<T>), dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata1, d_idata2, sx, sy, sz);
hipDeviceSynchronize();
}
template void maxvalue3Dgpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void maxvalue3Dgpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void maxvalue3Dgpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
// maximum projection
template <class T>
void maxprojection(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int pDirection){
size_t psx, psy, psz;
if (pDirection == 1){
psx = sx; psy = sy; psz = sz;
}
if (pDirection == 2){
psx = sz; psy = sx; psz = sy;
}
if (pDirection == 3){
psx = sy; psy = sz; psz = sx;
}
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(psx, threads.x), iDivUp(psy, threads.y));
hipLaunchKernelGGL(( maxprojectionkernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata, sx, sy, sz, psx, psy, psz, pDirection);
hipDeviceSynchronize();
}
template void maxprojection<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
template void maxprojection<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
template void maxprojection<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
//Other functions
template <class T>
void changestorageordergpu(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int orderMode){
//orderMode
// 1: change tiff storage order to C storage order
//-1: change C storage order to tiff storage order
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( changestorageordergpukernel<T>), dim3(grids), dim3(threads), 0, 0, d_odata, d_idata, sx, sy, sz, orderMode);
hipDeviceSynchronize();
}
template void changestorageordergpu<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordergpu<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordergpu<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
// rotate 90/-90 degree by axis
template <class T>
void rotbyyaxis(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection){
//rot direction
// 1: rotate 90 deg around Y axis
//-1: rotate -90 deg around Y axis
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( rotbyyaxiskernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata, sx, sy, sz, rotDirection);
hipDeviceSynchronize();
}
template void rotbyyaxis<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
template void rotbyyaxis<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
template void rotbyyaxis<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
/*
// rotate any degree by y axis: matrix for affine transformation
void rot3Dbyyaxis(float *d_odata, float theta, int sx, int sz, int sx2, int sz2){
// Rotation matrix:translation (-sx2/2, -sz2/2) --> rotation--> translation back(sx/2,sy/2)
// 1 0 0 sx / 2 cos(theta) 0 sin(theta) 0 1 0 0 -sx2/2
// 0 1 0 0 * 0 1 0 0 * 0 1 0 0
// 0 0 1 sz / 2 -sin(theta) 0 cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
d_odata[0] = cos(theta); d_odata[1] = 0; d_odata[2] = sin(theta);
d_odata[3] = sx / 2 - sx2 / 2 * cos(theta) - sz2 / 2 * sin(theta);
d_odata[4] = 0; d_odata[5] = 1; d_odata[6] = 0; d_odata[7] = 0;
d_odata[8] = -sin(theta); d_odata[9] = 0; d_odata[10] = cos(theta);
d_odata[11] = sz / 2 + sx2 / 2 * sin(theta) - sz2 / 2 * cos(theta);
}
*/
void p2matrix(float *m, float *x){
m[0] = x[4], m[1] = x[5], m[2] = x[6], m[3] = x[1];
m[4] = x[7], m[5] = x[8], m[6] = x[9], m[7] = x[2];
m[8] = x[10], m[9] = x[11], m[10] = x[12], m[11] = x[3];
/*
m[0] = x[1], m[1] = x[2], m[2] = x[3], m[3] = x[4];
m[4] = x[5], m[5] = x[6], m[6] = x[7], m[7] = x[8];
m[8] = x[9], m[9] = x[10], m[10] = x[11], m[11] = x[12];
*/
}
void matrix2p(float *m, float *x){
x[0] = 0;
x[1] = m[3], x[2] = m[7], x[3] = m[11], x[4] = m[0];
x[5] = m[1], x[6] = m[2], x[7] = m[4], x[8] = m[5];
x[9] = m[6], x[10] = m[8], x[11] = m[9], x[12] = m[10];
/*
x[1] = m[0], x[2] = m[1], x[3] = m[2], x[4] = m[3];
x[5] = m[4], x[6] = m[5], x[7] = m[6], x[8] = m[7];
x[9] = m[8], x[10] = m[9], x[11] = m[10], x[12] = m[11];
*/
}
extern "C" void matrixmultiply(float * m, float *m1, float *m2){//for transformation matrix calcution only
m[0] = m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8];
m[1] = m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9];
m[2] = m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10];
m[3] = m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3];
m[4] = m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8];
m[5] = m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9];
m[6] = m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10];
m[7] = m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7];
m[8] = m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8];
m[9] = m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9];
m[10] = m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10];
m[11] = m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11];
//**** 12 13 14 15 never change ****
//no need to calculate m[12,13,14,15]:0 0 0 1
/*
m[0] = m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8] + m1[3] * m2[12];
m[1] = m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9] + m1[3] * m2[13];
m[2] = m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10] + m1[3] * m2[14];
m[3] = m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3] * m2[15];
m[4] = m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8] + m1[7] * m2[12];
m[5] = m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9] + m1[7] * m2[13];
m[6] = m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10] + m1[7] * m2[14];
m[7] = m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7] * m2[15];
m[8] = m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8] + m1[11] * m2[12];
m[9] = m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9] + m1[11] * m2[13];
m[10] = m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10] + m1[11] * m2[14];
m[11] = m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11] * m2[15];
m[12] = m1[12] * m2[0] + m1[13] * m2[4] + m1[14] * m2[8] + m1[15] * m2[12];
m[13] = m1[12] * m2[1] + m1[13] * m2[5] + m1[14] * m2[9] + m1[15] * m2[13];
m[14] = m1[12] * m2[2] + m1[13] * m2[6] + m1[14] * m2[10] + m1[15] * m2[14];
m[15] = m1[12] * m2[3] + m1[13] * m2[7] + m1[14] * m2[11] + m1[15] * m2[15];
*/
}
extern "C" void rot2matrix(float * p_out, float theta, long long int sx, long long int sy, long long int sz, int rotAxis){
//p_out: 12 elements
//theta: rotation angle
//sx, sy, sz: images size
////rotAxis
// 1: rotate theta around X axis
// 2: rotate theta around Y axis
// 3: rotate theta around Z axis
long long int sNew;
float *p_temp, *p_temp1, *p_temp2, *p_temp3;
p_temp = (float *)malloc(16 * sizeof(float));
p_temp1 = (float *)malloc(16 * sizeof(float));
p_temp2 = (float *)malloc(16 * sizeof(float));
p_temp3 = (float *)malloc(16 * sizeof(float));
for (int i = 0; i < 15; i++){
p_temp[i] = p_temp1[i] = p_temp2[i] = p_temp3[i] = 0;
}
p_temp[15] = p_temp1[15] = p_temp2[15] = p_temp3[15] = 1; //**** 12 13 14 15 never change ****
// matrix: p_temp1 * p_temp2 * p_temp3
if (rotAxis == 1){//Rotate by x axis
// Rotation matrix:translation (0, -sx2/2, -sz2/2) --> rotation--> translation back(0,sy/2,sz/2)
// 1 0 0 0 1 0 0 0 1 0 0 0
// 0 1 0 sx / 2 * 0 cos(theta) sin(theta) 0 * 0 1 0 -sy2/2
// 0 0 1 sz / 2 0 -sin(theta) cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[7] = sy / 2; p_temp1[11] = sz / 2;
p_temp2[0] = 1; p_temp2[1] = 0; p_temp2[2] = 0; p_temp2[3] = 0;
p_temp2[4] = 0; p_temp2[5] = cos(theta); p_temp2[6] = sin(theta); p_temp2[7] = 0;
p_temp2[8] = 0; p_temp2[9] = -sin(theta); p_temp2[10] = cos(theta); p_temp2[11] = 0;
sNew = round(sqrt(sy * sy + sz*sz));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[7] = - sNew / 2; p_temp3[11] = - sNew / 2;
}
if (rotAxis == 2){//Rotate by y axis
// Rotation matrix:translation (-sx2/2, 0, -sz2/2) --> rotation--> translation back(sx/2,0,sz/2)
// 1 0 0 sx / 2 cos(theta) 0 -sin(theta) 0 1 0 0 -sx2/2
// 0 1 0 0 * 0 1 0 0 * 0 1 0 0
// 0 0 1 sz / 2 sin(theta) 0 cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[3] = sx / 2; p_temp1[11] = sz / 2;
p_temp2[0] = cos(theta); p_temp2[1] = 0; p_temp2[2] = -sin(theta); p_temp2[3] = 0;
p_temp2[4] = 0; p_temp2[5] = 1; p_temp2[6] = 0; p_temp2[7] = 0;
p_temp2[8] = sin(theta); p_temp2[9] = 0; p_temp2[10] = cos(theta); p_temp2[11] = 0;
sNew = round(sqrt(sx * sx + sz*sz));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[3] = -sNew / 2; p_temp3[11] = -sNew / 2;
}
if (rotAxis == 3){//Rotate by z axis
// Rotation matrix:translation (-sx2/2,-sy2/2, 0) --> rotation--> translation back(sx/2,sy/2,0)
// 1 0 0 sx / 2 cos(theta) sin(theta) 0 0 1 0 0 -sx2/2
// 0 1 0 sy / 2 * -sin(theta) cos(theta) 0 0 * 0 1 0 -sy2/2
// 0 0 1 0 0 0 1 0 0 0 1 0
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[3] = sx / 2; p_temp1[7] = sy / 2;
p_temp2[0] = cos(theta); p_temp2[1] = sin(theta); p_temp2[2] = 0; p_temp2[3] = 0;
p_temp2[4] = -sin(theta); p_temp2[5] = cos(theta); p_temp2[6] = 0; p_temp2[7] = 0;
p_temp2[8] = 0; p_temp2[9] = 0; p_temp2[10] = 1; p_temp2[11] = 0;
sNew = round(sqrt(sx * sx + sy*sy));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[3] = -sNew / 2; p_temp3[7] = -sNew / 2;
}
matrixmultiply(p_temp, p_temp1, p_temp2);
matrixmultiply(p_out, p_temp, p_temp3);
free(p_temp);
free(p_temp1);
free(p_temp2);
free(p_temp3);
}
extern "C" void dof9tomatrix(float * p_out, float *p_dof, int dofNum){
//p_out: 12 elements
//p_dof: 10 elements: 0 x y z alpha beta theda a b c
//dofNum: 3, 6, 7 or 9
float *p_temp1, *p_temp2, *p_temp3;
p_temp1 = (float *)malloc(16 * sizeof(float));
p_temp2 = (float *)malloc(16 * sizeof(float));
p_temp3 = (float *)malloc(16 * sizeof(float));
for (int i = 0; i < 15; i++){
p_temp1[i] = p_temp2[i] = p_temp3[i] = 0;
}
p_temp1[15] = p_temp2[15] = p_temp3[15] = 1; //**** 12 13 14 15 never change ****
float x, y, z, alpha, beta, theta, a, b, c;
if (dofNum == 3){//translation
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = 0;
beta = 0;
theta = 0;
a = 1;
b = 1;
c = 1;
}
else if (dofNum == 6){//rigid body: translation, rotation
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = 1;
b = 1;
c = 1;
}
else if (dofNum == 7){//translation,rotation, scale equelly in 3 dimemsions
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = p_dof[7];
b = p_dof[7];
c = p_dof[7];
}
else if (dofNum == 9){//translation,rotation,scale
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = p_dof[7];
b = p_dof[8];
c = p_dof[9];
}
//translation
// 1 0 0 x
// 0 1 0 y
// 0 0 1 z
// 0 0 0 1
p_temp2[3] = x;
p_temp2[7] = y;
p_temp2[11] = z;
// scaling
// a 0 0 0
// 0 b 0 0
// 0 0 c 0
// 0 0 0 1
p_temp2[0] = a;
p_temp2[5] = b;
p_temp2[10] = c;
// rotating by Z axis
// cos(alpha) sin(alpha) 0 0
// -sin(alpha) cos(alpha) 0 0
// 0 0 1 0
// 0 0 0 1
p_temp3[0] = cos(alpha); p_temp3[1] = sin(alpha); p_temp3[2] = 0; p_temp3[3] = 0;
p_temp3[4] = -sin(alpha); p_temp3[5] = cos(alpha); p_temp3[6] = 0; p_temp3[7] = 0;
p_temp3[8] = 0; p_temp3[9] = 0; p_temp3[10] = 1; p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_temp1, p_temp2, p_temp3);
// rotating by X axis
// 1 0 0 0
// 0 cos(beta) sin(beta) 0
// 0 -sin(beta) cos(beta) 0
// 0 0 0 1
p_temp3[0] = 1; p_temp3[1] = 0; p_temp3[2] = 0; p_temp3[3] = 0;
p_temp3[4] = 0; p_temp3[5] = cos(beta); p_temp3[6] = sin(beta); p_temp3[7] = 0;
p_temp3[8] = 0; p_temp3[9] = -sin(beta); p_temp3[10] = cos(beta); p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_temp2, p_temp1, p_temp3);
// rotating by Y axis
// cos(theta) 0 -sin(theta) 0
// 0 1 0 0
// sin(theta) 0 cos(theta) 0
// 0 0 0 1
p_temp3[0] = cos(theta); p_temp3[1] = 0; p_temp3[2] = -sin(theta); p_temp3[3] = 0;
p_temp3[4] = 0; p_temp3[5] = 1; p_temp3[6] = 0; p_temp3[7] = 0;
p_temp3[8] = sin(theta); p_temp3[9] = 0; p_temp3[10] = cos(theta); p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_out, p_temp2, p_temp3);
free(p_temp1);
free(p_temp2);
free(p_temp3);
}
template <class T>
void circshiftgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz){
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
hipLaunchKernelGGL(( circshiftgpukernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata, sx, sy, sz, dx, dy, dz);
hipDeviceSynchronize();
}
template void circshiftgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void circshiftgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void circshiftgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template <class T>
void imshiftgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz) {
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
imshiftgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz, dx, dy, dz);
hipDeviceSynchronize();
}
template void imshiftgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void imshiftgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void imshiftgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
extern "C" void CopyTranMatrix(float *x, int dataSize){
hipMemcpyToSymbol(d_aff, x, dataSize, 0, hipMemcpyHostToDevice);
}
template <class T>
void cudacopyhosttoarray(hipArray *d_Array, hipChannelFormatDesc channelDesc, T *h_idata, size_t sx, size_t sy, size_t sz){
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)h_idata, sx*sizeof(T), sx, sy);
copyParams.dstArray = d_Array;
copyParams.extent = make_hipExtent(sx, sy, sz);
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
hipDeviceSynchronize();
}
template void
cudacopyhosttoarray<unsigned short>(hipArray *d_Array, hipChannelFormatDesc channelDesc, unsigned short *h_idata, size_t sx, size_t sy, size_t sz);
template void
cudacopyhosttoarray<float>(hipArray *d_Array, hipChannelFormatDesc channelDesc, float *h_idata, size_t sx, size_t sy, size_t sz);
template <class T>
void cudacopydevicetoarray(hipArray *d_Array, hipChannelFormatDesc channelDesc, T *d_idata, size_t sx, size_t sy, size_t sz){
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)d_idata, sx*sizeof(T), sx, sy);
copyParams.dstArray = d_Array;
copyParams.extent = make_hipExtent(sx, sy, sz);
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(©Params);
hipDeviceSynchronize();
}
template void
cudacopydevicetoarray<unsigned short>(hipArray *d_Array, hipChannelFormatDesc channelDesc, unsigned short *d_idata, size_t sx, size_t sy, size_t sz);
template void
cudacopydevicetoarray<float>(hipArray *d_Array, hipChannelFormatDesc channelDesc, float *d_idata, size_t sx, size_t sy, size_t sz);
extern "C" void BindTexture(hipArray *d_Array, hipChannelFormatDesc channelDesc){
// set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.addressMode[2] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
hipBindTextureToArray(tex, d_Array, channelDesc);
hipDeviceSynchronize();
}
extern "C" void BindTexture2(hipArray *d_Array, hipChannelFormatDesc channelDesc) {
// set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.addressMode[2] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
hipBindTextureToArray(tex2, d_Array, channelDesc);
hipDeviceSynchronize();
}
extern "C" void BindTexture16(hipArray *d_Array, hipChannelFormatDesc channelDesc){
// set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.addressMode[2] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
hipBindTextureToArray(tex16, d_Array, channelDesc);
hipDeviceSynchronize();
}
extern "C" void UnbindTexture(){
hipUnbindTexture(tex);
hipDeviceSynchronize();
}
extern "C" void UnbindTexture2() {
hipUnbindTexture(tex2);
hipDeviceSynchronize();
}
extern "C" void UnbindTexture16(){
hipUnbindTexture(tex16);
hipDeviceSynchronize();
}
extern "C" void AccessTexture(float x, float y,float z){
dim3 threads(2, 2, 2);
hipLaunchKernelGGL(( accesstexturekernel) , dim3(1), dim3(threads) , 0, 0, x, y, z);
hipDeviceSynchronize();
}
template <class T>
void affineTransform(T *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grid(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
hipLaunchKernelGGL(( affinetransformkernel<T>), dim3(grid), dim3(threads) , 0, 0, d_s, sx, sy, sz, sx2, sy2, sz2);
hipDeviceSynchronize();
}
template void
affineTransform<unsigned short>(unsigned short *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2);
template void
affineTransform<float>(float *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2);
float corrfunc(float *d_t, float sd_t, float *aff, long long int sx,
long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2){
// temp bufs
long long int sxy = sx * sy;
double *d_temp1 = NULL, *d_temp2 = NULL;
hipMalloc((void **)&d_temp1, sxy * sizeof(double));
hipMalloc((void **)&d_temp2, sxy * sizeof(double));
//copy aff to GPU const
hipMemcpyToSymbol(d_aff, aff, 12 * sizeof(float), 0, hipMemcpyHostToDevice);// copy host affine matrix to device const
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
hipLaunchKernelGGL(( corrkernel), dim3(grids), dim3(threads), 0, 0, d_t, // the source image is texture, trans matrix is const
d_temp1, d_temp2, sx, sy, sz, sx2, sy2, sz2);
hipDeviceSynchronize();
double sqrSum = 0, corrSum = 0;
if (sxy > 100000){ // if count more than 100000, use gpu to perform sum
sqrSum = sumgpu1D(d_temp1, sxy);
corrSum = sumgpu1D(d_temp2, sxy);
}
else{
double *h_temp = NULL;
h_temp = (double *)malloc(sx*sy * sizeof(double));
hipMemcpy(h_temp, d_temp1, sxy * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < sxy; i++)
sqrSum += h_temp[i];
hipMemcpy(h_temp, d_temp2, sxy * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < sxy; i++)
corrSum += h_temp[i];
free(h_temp);
}
hipFree(d_temp1);
hipFree(d_temp2);
if (sqrt(sqrSum) == 0) return -2.0;
return (float)(corrSum / sqrt(sqrSum)) / sd_t;
}
extern "C" void BindTexture2D(hipArray *d_Array, hipChannelFormatDesc channelDesc){
// set texture parameters
tex2D1.addressMode[0] = hipAddressModeWrap;
tex2D1.addressMode[1] = hipAddressModeWrap;
tex2D1.filterMode = hipFilterModeLinear;
tex2D1.normalized = false; // access with normalized texture coordinates
// Bind the array to the texture
hipBindTextureToArray(tex2D1, d_Array, channelDesc);
}
extern "C" void UnbindTexture2D(
){
hipUnbindTexture(tex2D1);
}
extern "C"
void affineTransform2D(float *d_t, int sx, int sy, int sx2, int sy2){
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
hipLaunchKernelGGL(( affineTransform2Dkernel) , dim3(grids), dim3(threads) , 0, 0, d_t, sx, sy, sx2, sy2);
hipDeviceSynchronize();
}
float corrfunc2D(float *d_t, float sd_t, float *aff, long long int sx, long long int sy, long long int sx2, long long int sy2){
//copy aff to GPU const
hipMemcpyToSymbol(d_aff, aff, 6 * sizeof(float), 0, hipMemcpyHostToDevice);// copy host affine matrix to device const
long long int totalSize = sx*sy;
float *d_sqr = NULL, *d_corr = NULL, *h_temp = NULL;
hipMalloc((void **)&d_sqr, totalSize * sizeof(float));
hipMalloc((void **)&d_corr, totalSize * sizeof(float));
h_temp = (float *)malloc(totalSize * sizeof(float));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
hipLaunchKernelGGL(( corr2Dkernel) , dim3(grids), dim3(threads) , 0, 0, // the other image is texture, trans matrix is const
d_t, d_sqr, d_corr, sx, sy, sx2, sy2);
hipDeviceSynchronize();
hipMemcpy(h_temp, d_corr, totalSize * sizeof(float), hipMemcpyDeviceToHost);
double corrSum = sumcpu(h_temp, totalSize);
hipMemcpy(h_temp, d_sqr, totalSize * sizeof(float), hipMemcpyDeviceToHost);
double sqrSum = sumcpu(h_temp, totalSize);
hipFree(d_sqr);
hipFree(d_corr);
free(h_temp);
if (sqrt(sqrSum) == 0) return -2.0;
return float(corrSum / sqrt(sqrSum))/sd_t;
}
///// CPU interpolation
float lerp(float x, float x1, float x2, float q00, float q01) {
return ((x2 - x) / (x2 - x1)) * q01 + ((x - x1) / (x2 - x1)) * q00;
}
float bilerp(float x, float y, float x1, float x2, float y1, float y2, float q11, float q12, float q21, float q22) {
float r1 = lerp(x, x1, x2, q11, q12);
float r2 = lerp(x, x1, x2, q21, q22);
return lerp(y, y1, y2, r1, r2);
}
float trilerp(float x, float y, float z, float x1, float x2, float y1, float y2, float z1, float z2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = bilerp(x, y, x1, x2, y1, y2, q111, q112, q121, q122);
float r2 = bilerp(x, y, x1, x2, y1, y2, q211, q212, q221, q222);
return lerp(z, z1, z2, r1, r2);
}
float ilerp(float x, float x1, float x2, float q00, float q01) {
return (x2 - x) * q00 + (x - x1) * q01;
}
float ibilerp(float x, float y, float x1, float x2, float y1, float y2, float q11, float q12, float q21, float q22) {
float r1 = ilerp(x, x1, x2, q11, q12);
float r2 = ilerp(x, x1, x2, q21, q22);
return ilerp(y, y1, y2, r1, r2);
}
float itrilerp(float x, float y, float z, float x1, float x2, float y1, float y2, float z1, float z2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = ibilerp(x, y, x1, x2, y1, y2, q111, q112, q121, q122);
float r2 = ibilerp(x, y, x1, x2, y1, y2, q211, q212, q221, q222);
return ilerp(z, z1, z2, r1, r2);
}
float ilerp2(float dx1, float dx2, float q00, float q01) {
return dx2 * q00 + dx1 * q01;
}
float ibilerp2(float dx1, float dx2, float dy1, float dy2, float q11, float q12, float q21, float q22) {
float r1 = ilerp2(dx1, dx2, q11, q12);
float r2 = ilerp2(dx1, dx2, q21, q22);
return ilerp2(dy1, dy2, r1, r2);
}
float itrilerp2(float dx1, float dx2, float dy1, float dy2, float dz1, float dz2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = ibilerp2(dx1, dx2, dy1, dy2, q111, q112, q121, q122);
float r2 = ibilerp2(dx1, dx2, dy1, dy2, q211, q212, q221, q222);
return ilerp2(dz1, dz2, r1, r2);
}
//output[sz-k-1][j][i] = input[i][j][k]
//d_odata[(sz - k - 1)*sx*sy + j*sx + i] = d_idata[i*sy*sz + j*sz + k];
double corrfunccpu(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
double sqrSum = 0, corrSum = 0;
int x1, y1, z1, x2, y2, z2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int sxy = sx*sy, sxy2 = sx2*sy2;
for (int i = 0; i < sx; i++){
for (int j = 0; j < sy; j++){
for (int k = 0; k < sz; k++){
float ix = (float)i;
float iy = (float)j;
float iz = (float)k;
float tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
float ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
float tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = floor(tx); y1 = floor(ty); z1 = floor(tz);
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
if ((x1 >= 0) && (y1 >= 0) && (z1 >= 0) && (x2 < sx2) && (y2 < sy2) && (z2 < sz2)){
// [k*sy*sx + j*sx + i]
q1 = h_t[z1*sxy2 + y1*sx2 + x1];
q2 = h_t[z1*sxy2 + y1*sx2 + x2];
q3 = h_t[z1*sxy2 + y2*sx2 + x1];
q4 = h_t[z1*sxy2 + y2*sx2 + x2];
q5 = h_t[z2*sxy2 + y1*sx2 + x1];
q6 = h_t[z2*sxy2 + y1*sx2 + x2];
q7 = h_t[z2*sxy2 + y2*sx2 + x1];
q8 = h_t[z2*sxy2 + y2*sx2 + x2];
t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
s = h_s[k*sxy + j*sx + i];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
double corrfunccpu3(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5= aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
//t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
//t = 1;
}
else
t = 0;
s = h_s[i*syz + j*sz + k];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
double corrfunccpu2_old(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5 = aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int sxy = sx*sy, sxy2 = sx2*sy2, z1sxy2, z2sxy2, y1sx2, y2sx2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
z1sxy2 = z1*sxy2;
z2sxy2 = z2*sxy2;
y1sx2 = y1*sx2;
y2sx2 = y2*sx2;
q1 = h_t[z1sxy2 + y1sx2 + x1];
q2 = h_t[z1sxy2 + y1sx2 + x2];
q3 = h_t[z1sxy2 + y2sx2 + x1];
q4 = h_t[z1sxy2 + y2sx2 + x2];
q5 = h_t[z2sxy2 + y1sx2 + x1];
q6 = h_t[z2sxy2 + y1sx2 + x2];
q7 = h_t[z2sxy2 + y2sx2 + x1];
q8 = h_t[z2sxy2 + y2sx2 + x2];
//t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
//t = 1;
}
else
t = 0;
s = h_s[k*sxy + j*sx + i];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
void affinetransformcpu_old(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float t;
int sxy = sx*sy, sxy2 = sx2*sy2, z1sxy2, z2sxy2, y1sx2, y2sx2;
int syz = sy*sz, syz2 = sy2*sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
z1sxy2 = z1*sxy2;
z2sxy2 = z2*sxy2;
y1sx2 = y1*sx2;
y2sx2 = y2*sx2;
q1 = h_t[z1sxy2 + y1sx2 + x1];
q2 = h_t[z1sxy2 + y1sx2 + x2];
q3 = h_t[z1sxy2 + y2sx2 + x1];
q4 = h_t[z1sxy2 + y2sx2 + x2];
q5 = h_t[z2sxy2 + y1sx2 + x1];
q6 = h_t[z2sxy2 + y1sx2 + x2];
q7 = h_t[z2sxy2 + y2sx2 + x1];
q8 = h_t[z2sxy2 + y2sx2 + x2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
}
else
t = 0;
h_s[k*sxy + j*sx + j] = t;
}
}
}
}
double corrfunccpu2(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5 = aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
s = h_s[i*syz + j*sz + k];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
void affinetransformcpu(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
h_s[i*syz + j*sz + k] = t;
}
}
}
}
// CPU
template <class T>
void flipcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz) {
for (long long int i = 0; i < sx; i++) {
for (long long int j = 0; j < sy; j++) {
for (long long int k = 0; k < sz; k++) {
//d_odata[k*sy*sx + j*sx + i] = d_idata[(sz - k - 1) *sy*sx + (sy - j - 1)*sx + (sx - i - 1)];
h_odata[i*sy*sz + j*sz + k] = h_idata[(sx - i - 1) *sy*sz + (sy - j - 1)*sz + (sz - k - 1)];
}
}
}
}
template void flipcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz);
template void flipcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz);
template void flipcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz);
template <class T>
void padPSFcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = sx2 / 2; soy = sy2 / 2; soz = sz2 / 2;
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = x - sox; dy = y - soy; dz = z - soz;
if (dx < 0) dx += sx;
if (dy < 0) dy += sy;
if (dz < 0) dz += sz;
//d_PaddedPSF[dz][dy][dx] = d_PSF[z][y][x]
if (dx >= 0 && dx < sx && dy >= 0 && dy < sy && dz >= 0 && dz < sz) {
//d_odata[dz*sy*sx + dy*sx + dx] = d_idata[z*sy2*sx2 + y*sx2 + x];
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
}
template void
padPSFcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void padstackcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
long long int x, y, z;
for (long long int dx = 0; dx < sx; dx++) {
for (long long int dy = 0; dy < sy; dy++) {
for (long long int dz = 0; dz < sz; dz++) {
if (dx < sox) {
x = 0;
}
if (dy < soy) {
y = 0;
}
if (dz < soz) {
z = 0;
}
if (dx >= sox && dx < (sox + sx2)) {
x = dx - sox;
}
if (dy >= soy && dy < (soy + sy2)) {
y = dy - soy;
}
if (dz >= soz && dz < (soz + sz2)) {
z = dz - soz;
}
if (dx >= (sox + sx2)) {
x = sx2 - 1;
}
if (dy >= (soy + sy2)) {
y = sy2 - 1;
}
if (dz >= (soz + sz2)) {
z = sz2 - 1;
}
//d_odata[dz*sy*sx + dy*sx + dx] = d_idata[z*sy2*sx2 + y*sx2 + x];
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
template void
padstackcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = (sx2 - sx) / 2;
soy = (sy2 - sy) / 2;
soz = (sz2 - sz) / 2;
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = sox + x; dy = soy + y; dz = soz + z;
//d_odata[z*sy*sx + y*sx + x] = d_idata[dz*sy2*sx2 + dy*sx2 + dx];
h_odata[x*sy*sz + y*sz + z] = h_idata[dx*sy2*sz2 + dy*sz2 + dz];
}
}
}
}
template void
cropcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropcpu2(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz) {
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = sox + x; dy = soy + y; dz = soz + z;
h_odata[z*sy*sx + y*sx + x] = h_idata[dz*sy2*sx2 + dy*sx2 + dx];
//h_odata[x*sy*sz + y*sz + z] = h_idata[dx*sy2*sz2 + dy*sz2 + dz];
}
}
}
}
template void
cropcpu2<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropcpu2<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropcpu2<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template <class T>
void alignsize3Dcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
long long int x, y, z;
for (long long int dx = 0; dx < sx; dx++) {
for (long long int dy = 0; dy < sy; dy++) {
for (long long int dz = 0; dz < sz; dz++) {
x = dx - sox;
y = dy - soy;
z = dz - soz;
if ((x < 0) || (y < 0) || (z < 0) || (x >= sx2) || (y >= sy2) || (z >= sz2))
h_odata[dx*sy*sz + dy*sz + dz] = 0;
else
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
template void alignsize3Dcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
extern "C"
void genOTFcpu(fftwf_complex *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, bool normFlag) {
long long int totalSizeIn = sx2 * sy2 * sz2;
long long int totalSizeOut = sx * sy * sz;
long long int totalSizeMax = (totalSizeIn > totalSizeOut) ? totalSizeIn : totalSizeOut;
float *h_temp = (float *)malloc(totalSizeMax * sizeof(float));
if (normFlag) {
double sumValue = sumcpu(h_idata, sx2 * sy2 * sz2);
multivaluecpu(h_temp, h_idata, (float)(1 / sumValue), sx2 * sy2 * sz2);
}
else
memcpy(h_temp, h_idata, totalSizeIn * sizeof(float));
if((sx<sx2)||(sy<sy2)||(sz<sz2)){
alignsize3Dcpu((float *)h_odata, h_temp, sx, sy, sz, sx2, sy2, sz2);
padPSFcpu(h_temp, (float *)h_odata, sx, sy, sz, sx, sy, sz);
}
else {
padPSFcpu((float *)h_odata, h_temp, sx, sy, sz, sx2, sy2, sz2);
memcpy(h_temp, h_odata, totalSizeOut * sizeof(float));
}
fftwf_plan image2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_temp, h_odata, FFTW_MEASURE);
fftwf_execute(image2Spectrum);
free(h_temp);
fftwf_destroy_plan(image2Spectrum);
}
// GPU
template <class T>
void flipgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz) {
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
flipgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz);
hipDeviceSynchronize();
}
template void flipgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz);
template void flipgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz);
template void flipgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz);
template <class T>
void padPSFgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = sx2 / 2; soy = sy2 / 2; soz = sz2 / 2;
hipMemset(d_odata, 0, sx*sy*sz * sizeof(T));
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx2, threads.x), iDivUp(sy2, threads.y), iDivUp(sz2, threads.z));
padPSFgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
hipDeviceSynchronize();
}
template void
padPSFgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void padstackgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
padstackgpukernel<T> << < grids, threads >> > (d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
hipDeviceSynchronize();
}
template void
padstackgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx2 - sx) / 2;
soy = (sy2 - sy) / 2;
soz = (sz2 - sz) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
hipLaunchKernelGGL(( cropgpukernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
hipDeviceSynchronize();
}
template void
cropgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropgpu2(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz) {
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sz, threads.x), iDivUp(sy, threads.y), iDivUp(sx, threads.z));
hipLaunchKernelGGL(( cropgpukernel<T>) , dim3(grids), dim3(threads) , 0, 0, d_odata, d_idata, sz, sy, sx, sz2, sy2, sx2, soz, soy, sox);
hipDeviceSynchronize();
}
template void
cropgpu2<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropgpu2<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropgpu2<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template <class T>
void alignsize3Dgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
alignsize3Dgpukernel<T> << < grids, threads >> > (d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
hipDeviceSynchronize();
}
template void alignsize3Dgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
// Registration variables: 2D
static float *d_img2D = NULL;
static float *h_aff2D;
static long long int imx2D1, imy2D1, imx2D2, imy2D2;
static float valueStatic2D;
static int itNumStatic2D;
// Registration variables: 3D
static float *d_imgStatic = NULL;
static float valueStatic;
static long long int sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2;
static float *affCoef;
static int itNumStatic, dofNum;
static bool dof9Flag;
static float *h_s3D = NULL, *h_t3D = NULL;
float costfunc2D(float *x) {
h_aff2D[0] = x[1], h_aff2D[1] = x[2], h_aff2D[2] = x[3];
h_aff2D[3] = x[4], h_aff2D[4] = x[5], h_aff2D[5] = x[6];
float costValue = corrfunc2D(d_img2D, valueStatic2D, h_aff2D, imx2D1, imy2D1, imx2D2, imy2D2);
itNumStatic2D += 1;
return -costValue;
}
extern "C"
int affinetrans2d0(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sx2, long long int sy2) {
return 0;
}
extern "C"
// bug in affinetrans2d1
int affinetrans2d1(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sx2, long long int sy2) {
// total pixel count for each images
long long int totalSize1 = sx*sy;
long long int totalSize2 = sx2*sx2;
float *d_imgTemp = NULL;
hipMalloc((void **)&d_imgTemp, totalSize1 * sizeof(float));
cudaCheckErrors("****Memory allocating fails... GPU out of memory !!!!*****\n");
hipChannelFormatDesc channelDesc2D =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *d_Array2D;
hipMallocArray(&d_Array2D, &channelDesc2D, sx2, sy2);
cudaCheckErrors("****Memory array allocating fails... GPU out of memory !!!!*****\n");
CopyTranMatrix(iTmx, 6 * sizeof(float));
hipMemcpyToArray(d_Array2D, 0, 0, h_idata, totalSize2 * sizeof(float), hipMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
affineTransform2D(d_imgTemp, sx, sy, sx2, sy2);
UnbindTexture2D;
hipMemcpy(h_odata, d_imgTemp, totalSize1 * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_imgTemp);
hipFreeArray(d_Array2D);
return 0;
}
extern "C"
int reg2d_phasor0(long long int *shiftXY, float *h_img1, float *h_img2, long long int sx, long long int sy) {
return 0;
}
extern "C"
int reg2d_phasor1(long long int *shiftXY, float *d_img1, float *d_img2, long long int sx, long long int sy) {
int totalSize = sx * sy;
int totalSizeSpectrum = sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
hipMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
hipMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
hipfftHandle
fftPlanFwd,
fftPlanInv;
hipfftPlan2d(&fftPlanFwd, sy, sx, HIPFFT_R2C);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img1, (hipfftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sy, (sx / 2 + 1), 1);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img2, (hipfftComplex *)d_Spectrum2);
// multiplication and normalization
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sy, (sx / 2 + 1), 1);
hipfftDestroy(fftPlanFwd);
hipfftPlan2d(&fftPlanInv, sy, sx, HIPFFT_C2R);
float *d_phasor1 = (float *)d_Spectrum1;
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_Spectrum2, (hipfftReal *)d_phasor1);
hipfftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_phasor1, sx, sy, 1, round(sx / 2), round(sy / 2), 0);
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, 1);
shiftXY[0] = corXYZ[0] - sx / 2;
shiftXY[1] = corXYZ[1] - sy / 2;
hipFree(d_Spectrum1);
hipFree(d_Spectrum2);
// compare 4 cases based on cross-correlation
long long int shiftX = shiftXY[0];
long long int shiftY = shiftXY[1];
long long int xabs = abs(shiftX), yabs = abs(shiftY);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx / beta)) || (yabs >(sy / beta))) {
float *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizeMaxCrop = totalSize;
hipMalloc((void **)&d_imgT, totalSize * sizeof(float));
hipMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
hipMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
circshiftgpu(d_imgT, d_img2, sx, sy, 1, -shiftX, -shiftY, 0);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
int indx = 0, indy = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] >(sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] >(sy / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], 1, sx, sy, 1, imox[i], imoy[j], 0);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], 1, sx, sy, 1, imox[i], imoy[j], 0);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], 1);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXY[0] = shiftX - sx;
else
shiftXY[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXY[1] = shiftY - sy;
else
shiftXY[1] = shiftY + sy;
}
hipFree(d_imgT);
hipFree(d_crop1);
hipFree(d_crop2);
}
return 0;
}
extern "C"
int reg2d_affine0(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy,
long long int sx2, long long int sy2, int affMethod, bool flagTmx, float FTOL, int itLimit, float *regRecords) {
// **** CPU affine registration for 2D images ***
return 0;
}
extern "C"
int reg2d_affine1(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy,
long long int sx2, long long int sy2, int affMethod, bool flagTmx, float FTOL, int itLimit, float *records) {
// **** GPU affine registration for 2D images ***
/*
*** flagTmx:
true : use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
imx2D1 = sx; imy2D1 = sy;
imx2D2 = sx2; imy2D2 = sy2;
// total pixel count for each images
long long int totalSize1 = imx2D1*imy2D1;
long long int totalSize2 = imx2D2*imy2D2;
long long int totalSizeMax = (totalSize1 > totalSize2) ? totalSize1 : totalSize2;
// ****************** Processing Starts***************** //
// variables for memory and time cost records
clock_t start, end, ctime1, ctime2, ctime3;
start = clock();
int iter;
float fret;
int DIM2D = 6;
h_aff2D = (float *)malloc(DIM2D * sizeof(float));
static float *p2D = (float *)malloc((DIM2D + 1) * sizeof(float));
float **xi2D;
xi2D = matrix(1, DIM2D, 1, DIM2D);
float *h_imgT = (float *)malloc(totalSizeMax * sizeof(float));
hipMalloc((void **)&d_img2D, totalSize1 * sizeof(float));
cudaCheckErrors("****Memory allocating fails... GPU out of memory !!!!*****\n");
hipChannelFormatDesc channelDesc2D =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *d_Array2D;
hipMallocArray(&d_Array2D, &channelDesc2D, imx2D2, imy2D2);
cudaCheckErrors("****Memory array allocating fails... GPU out of memory !!!!*****\n");
if (flagTmx) {
memcpy(h_aff2D, iTmx, DIM2D * sizeof(float));
}
else {
h_aff2D[0] = 1, h_aff2D[1] = 0, h_aff2D[2] = (imx2D2 - imx2D1) / 2;
h_aff2D[3] = 0, h_aff2D[4] = 1, h_aff2D[5] = (imy2D2 - imy2D1) / 2;
}
p2D[0] = 0;
p2D[1] = h_aff2D[0], p2D[2] = h_aff2D[1], p2D[3] = h_aff2D[2];
p2D[4] = h_aff2D[3], p2D[5] = h_aff2D[4], p2D[6] = h_aff2D[5];
for (int i = 1; i <= DIM2D; i++)
for (int j = 1; j <= DIM2D; j++)
xi2D[i][j] = (i == j ? 1.0 : 0.0);
float meanValue = (float)sumcpu(h_img1, totalSize1) / totalSize1;
addvaluecpu(h_imgT, h_img1, -meanValue, totalSize1);
multicpu(h_reg, h_imgT, h_imgT, totalSize1);
double sumSqrA = sumcpu(h_reg, totalSize1);
valueStatic2D = float(sqrt(sumSqrA));
if (valueStatic2D == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
hipMemcpy(d_img2D, h_imgT, totalSize1 * sizeof(float), hipMemcpyHostToDevice);
meanValue = (float)sumcpu(h_img2, totalSize2) / totalSize2;
addvaluecpu(h_imgT, h_img2, -meanValue, totalSize2);
hipMemcpyToArray(d_Array2D, 0, 0, h_imgT, totalSize2 * sizeof(float), hipMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
cudaCheckErrors("****Fail to bind 2D texture!!!!*****\n");
itNumStatic2D = 0;
ctime1 = clock();
records[1] = -costfunc2D(p2D);
ctime2 = clock();
if (affMethod > 0) {
powell(p2D, xi2D, DIM2D, FTOL, &iter, &fret, costfunc2D, &itNumStatic2D, itLimit);
memcpy(iTmx, h_aff2D, DIM2D * sizeof(float));
}
UnbindTexture2D;
ctime3 = clock();
hipMemcpyToArray(d_Array2D, 0, 0, h_img2, totalSize2 * sizeof(float), hipMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
affineTransform2D(d_img2D, imx2D1, imy2D1, imx2D2, imy2D2);
UnbindTexture2D;
hipMemcpy(h_reg, d_img2D, totalSize1 * sizeof(float), hipMemcpyDeviceToHost);
records[3] = -fret;
records[4] = (float)(ctime2 - ctime1);
records[5] = itNumStatic2D;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
free(p2D);
free(h_aff2D);
free_matrix(xi2D, 1, DIM2D, 1, DIM2D);
free(h_imgT);
hipFree(d_img2D);
hipFreeArray(d_Array2D);
end = clock();
records[7] = (float)(end - start) / CLOCKS_PER_SEC;
return 0;
}
extern "C"
int affinetrans3d0(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
// cpu
return 0;
}
extern "C"
int affinetrans3d1(float *d_odata, float *iTmx, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *d_ArrayTemp;
hipMalloc3DArray(&d_ArrayTemp, &channelDesc, make_hipExtent(sx2, sy2, sz2));
hipDeviceSynchronize();
cudaCheckErrors("****GPU array memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_ArrayTemp, channelDesc, d_idata, sx2, sy2, sz2);
BindTexture(d_ArrayTemp, channelDesc);
CopyTranMatrix(iTmx, NDIM * sizeof(float));
affineTransform(d_odata, sx, sy, sz, sx2, sy2, sz2);
UnbindTexture();
hipFreeArray(d_ArrayTemp);
return 0;
}
extern "C"
int affinetrans3d2(float *d_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *d_ArrayTemp;
hipMalloc3DArray(&d_ArrayTemp, &channelDesc, make_hipExtent(sx2, sy2, sz2));
hipDeviceSynchronize();
cudaCheckErrors("****GPU array memory allocating fails... GPU out of memory !!!!*****\n");
cudacopyhosttoarray(d_ArrayTemp, channelDesc, h_idata, sx2, sy2, sz2);
BindTexture(d_ArrayTemp, channelDesc);
CopyTranMatrix(iTmx, NDIM * sizeof(float));
affineTransform(d_odata, sx, sy, sz, sx2, sy2, sz2);
UnbindTexture();
hipFreeArray(d_ArrayTemp);
return 0;
}
float costfunc(float *x) {
if (dof9Flag) {
dof9tomatrix(affCoef, x, dofNum);
}
else {
p2matrix(affCoef, x);
}
float costValue = corrfunc(d_imgStatic, valueStatic, affCoef, sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2);
itNumStatic += 1;
return -costValue;
}
float costfunccpu(float *x) { // **** this function does not work correctly
if (dof9Flag) {
dof9tomatrix(affCoef, x, dofNum);
}
else {
p2matrix(affCoef, x);
}
double costValue = corrfunccpu2(h_s3D, h_t3D, affCoef, sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2);
itNumStatic += 1;
return (float)(-costValue / valueStatic);
}
extern "C"
float zncc0(float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
return 0;
}
extern "C"
float zncc1(float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
// d_img1, d_img2 value change after calculation
float znccValue = -2.0;
long long int totalSize = sx*sy*sz;
float *d_imgT = NULL;
hipMalloc((void **)&d_imgT, totalSize * sizeof(float));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
double sumImg1 = 0, sumImg2 = 0, sumST = 0, sumSS = 0, sumTT = 0;
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_img1, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
addvaluegpu(d_img2, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
multi3Dgpu(d_imgT, d_img1, d_img2, sx, sy, sz);
sumST = sum3Dgpu(d_imgT, sx, sy, sz);
multi3Dgpu(d_imgT, d_img1, d_img1, sx, sy, sz);
sumTT = sum3Dgpu(d_imgT, sx, sy, sz);
multi3Dgpu(d_imgT, d_img2, d_img2, sx, sy, sz);
sumSS = sum3Dgpu(d_imgT, sx, sy, sz);
hipFree(d_imgT);
float b = float(sqrt(sumTT*sumSS));
if (b != 0)
znccValue = sumST / b;
return znccValue;
}
extern "C"
float zncc2(float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
// d_img1, d_img2 value change after calculation
float znccValue = -2.0;
long long int totalSize = sx*sy*sz;
double sumImg1 = 0, sumImg2 = 0, sumST = 0, sumSS = 0, sumTT = 0;
float *h_img1 = (float *)malloc(totalSize * sizeof(float));
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_img1, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
addvaluegpu(d_img2, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
hipMemcpy(h_img1, d_img1, totalSize * sizeof(float), hipMemcpyDeviceToHost);
multi3Dgpu(d_img1, d_img1, d_img1, sx, sy, sz);
sumTT = sum3Dgpu(d_img1, sx, sy, sz);
hipMemcpy(d_img1, h_img1, totalSize * sizeof(float), hipMemcpyHostToDevice);
multi3Dgpu(d_img1, d_img1, d_img2, sx, sy, sz);
sumST = sum3Dgpu(d_img1, sx, sy, sz);
multi3Dgpu(d_img2, d_img2, d_img2, sx, sy, sz);
sumSS = sum3Dgpu(d_img2, sx, sy, sz);
free(h_img1);
float b = float(sqrt(sumTT*sumSS));
if (b != 0)
znccValue = sumST / b;
return znccValue;
}
extern "C"
extern "C"
int reg3d_phasor0(long long int *shiftXYZ, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
return 0;
}
extern "C"
int reg3d_phasor1(long long int *shiftXYZ, float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
int totalSize = sx * sy * sz;
int totalSizeSpectrum = sz * sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
hipMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
hipMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
hipfftHandle
fftPlanFwd,
fftPlanInv;
hipfftPlan3d(&fftPlanFwd, sz, sy, sx, HIPFFT_R2C);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img1, (hipfftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img2, (hipfftComplex *)d_Spectrum2);
// multiplication and normalization
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
hipfftDestroy(fftPlanFwd);
hipfftPlan3d(&fftPlanInv, sz, sy, sx, HIPFFT_C2R);
float *d_phasor1 = (float *)d_Spectrum1;
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_Spectrum2, (hipfftReal *)d_phasor1);
hipfftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_phasor1, sx, sy, sz, round(sx / 2), round(sy / 2), round(sz / 2));
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, sz);
shiftXYZ[0] = corXYZ[0] - sx / 2;
shiftXYZ[1] = corXYZ[1] - sy / 2;
shiftXYZ[2] = corXYZ[2] - sz / 2;
hipFree(d_Spectrum1);
hipFree(d_Spectrum2);
// compare 8 cases based on cross-correlation
long long int shiftX = shiftXYZ[0];
long long int shiftY = shiftXYZ[1];
long long int shiftZ = shiftXYZ[2];
long long int xabs = abs(shiftX), yabs = abs(shiftY), zabs = abs(shiftZ);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx /beta)) ||( yabs >(sy / beta)) || (zabs >(sz / beta))) {
float *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizex1 = xabs * sy * sz; sizex2 = (sx - xabs) * sy * sz;
sizey1 = sx *yabs * sz; sizey2 = sx * (sy - yabs) * sz;
sizez1 = sx * sy * zabs; sizez2 = sx * sy * (sz - zabs);
sizex = (sizex1 > sizex2) ? sizex1 : sizex2;
sizey = (sizey1 > sizey2) ? sizey1 : sizey2;
sizez = (sizez1 > sizez2) ? sizez1 : sizez2;
sizeMaxCrop = (sizex > sizey) ? sizex : sizey;
sizeMaxCrop = (sizeMaxCrop > sizez) ? sizeMaxCrop : sizez;
hipMalloc((void **)&d_imgT, totalSize * sizeof(float));
hipMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
hipMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
circshiftgpu(d_imgT, d_img2, sx, sy, sz, -shiftX, -shiftY, -shiftZ);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
imSizeCropz[0] = sz - zabs; imSizeCropz[1] = zabs;
if (shiftZ > 0) {
imoz[0] = 0; imoz[1] = sz - zabs;
}
else {
imoz[0] = zabs; imoz[1] = 0;
}
int indx = 0, indy = 0, indz = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] > (sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] > (sy / beta)) {
for (int k = 0; k < 2; k++) {
if (imSizeCropz[k] > (sz / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k]);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
indz = k;
}
}
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXYZ[0] = shiftX - sx;
else
shiftXYZ[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXYZ[1] = shiftY - sy;
else
shiftXYZ[1] = shiftY + sy;
}
if (indz == 1) {
if (shiftZ > 0)
shiftXYZ[2] = shiftZ - sz;
else
shiftXYZ[2] = shiftZ + sz;
}
hipFree(d_imgT);
hipFree(d_crop1);
hipFree(d_crop2);
}
return 0;
}
extern "C"
int reg3d_phasor2(long long int *shiftXYZ, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
int totalSize = sx * sy * sz;
int totalSizeSpectrum = sz * sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
hipMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
hipMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
float *d_img = (float *)d_Spectrum1;
fComplex *h_Spectrum1 = (fComplex *)malloc(totalSizeSpectrum * sizeof(fComplex));
hipfftHandle
fftPlanFwd,
fftPlanInv;
hipfftPlan3d(&fftPlanFwd, sz, sy, sx, HIPFFT_R2C);
hipMemcpy(d_img, h_img1, totalSize * sizeof(float), hipMemcpyHostToDevice);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img, (hipfftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
hipMemcpy(h_Spectrum1, d_Spectrum1, totalSizeSpectrum * sizeof(fComplex), hipMemcpyDeviceToHost);
hipMemcpy(d_img, h_img2, totalSize * sizeof(float), hipMemcpyHostToDevice);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_img, (hipfftComplex *)d_Spectrum2);
// multiplication and normalization
hipMemcpy(d_Spectrum1, h_Spectrum1, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
hipfftDestroy(fftPlanFwd);
hipfftPlan3d(&fftPlanInv, sz, sy, sx, HIPFFT_C2R);
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_Spectrum2, (hipfftReal *)d_img);
hipfftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_img, sx, sy, sz, round(sx / 2), round(sy / 2), round(sz / 2));
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, sz);
shiftXYZ[0] = corXYZ[0] - sx / 2;
shiftXYZ[1] = corXYZ[1] - sy / 2;
shiftXYZ[2] = corXYZ[2] - sz / 2;
hipFree(d_Spectrum1);
hipFree(d_Spectrum2);
// compare 8 cases based on cross-correlation
long long int shiftX = shiftXYZ[0];
long long int shiftY = shiftXYZ[1];
long long int shiftZ = shiftXYZ[2];
long long int xabs = abs(shiftX), yabs = abs(shiftY), zabs = abs(shiftZ);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx / beta)) || (yabs >(sy / beta)) || (zabs >(sz / beta))) {
float *d_img1 = NULL, *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizex1 = xabs * sy * sz; sizex2 = (sx - xabs) * sy * sz;
sizey1 = sx *yabs * sz; sizey2 = sx * (sy - yabs) * sz;
sizez1 = sx * sy * zabs; sizez2 = sx * sy * (sz - zabs);
sizex = (sizex1 > sizex2) ? sizex1 : sizex2;
sizey = (sizey1 > sizey2) ? sizey1 : sizey2;
sizez = (sizez1 > sizez2) ? sizez1 : sizez2;
sizeMaxCrop = (sizex > sizey) ? sizex : sizey;
sizeMaxCrop = (sizeMaxCrop > sizez) ? sizeMaxCrop : sizez;
hipMalloc((void **)&d_img1, totalSize * sizeof(float));
hipMalloc((void **)&d_imgT, totalSize * sizeof(float));
hipMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
hipMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
hipMemcpy(d_img1, h_img2, totalSize * sizeof(float), hipMemcpyHostToDevice);
circshiftgpu(d_imgT, d_img1, sx, sy, sz, -shiftX, -shiftY, -shiftZ);
hipMemcpy(d_img1, h_img1, totalSize * sizeof(float), hipMemcpyHostToDevice);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
imSizeCropz[0] = sz - zabs; imSizeCropz[1] = zabs;
if (shiftZ > 0) {
imoz[0] = 0; imoz[1] = sz - zabs;
}
else {
imoz[0] = zabs; imoz[1] = 0;
}
int indx = 0, indy = 0, indz = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] >(sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] >(sy / beta)) {
for (int k = 0; k < 2; k++) {
if (imSizeCropz[k] >(sz / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k]);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
indz = k;
}
}
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXYZ[0] = shiftX - sx;
else
shiftXYZ[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXYZ[1] = shiftY - sy;
else
shiftXYZ[1] = shiftY + sy;
}
if (indz == 1) {
if (shiftZ > 0)
shiftXYZ[2] = shiftZ - sz;
else
shiftXYZ[2] = shiftZ + sz;
}
hipFree(d_img1);
hipFree(d_imgT);
hipFree(d_crop1);
hipFree(d_crop2);
}
return 0;
}
int reg3d_affine0(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
return 0;
}
extern "C"
int reg3d_affine1(float *d_reg, float *iTmx, float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
// **** affine registration when GPU memory is sufficient: 3 images + 1 cuda array ***
/*
*** affine registration method:
0: no registration, transform d_img2 based on input matrix;
1: translation only;
2: rigid body;
3: 7 degrees of freedom (translation, rotation, scaling equally in 3 dimensions)
4: 9 degrees of freedom(translation, rotation, scaling);
5: 12 degrees of freedom;
6: rigid body first, then do 12 degrees of freedom;
7: 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
*** flagTmx:
true: use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
// ************get basic input images information ******************
// image size
sxStatic1 = sx; syStatic1 = sy; szStatic1 = sz;
sxStatic2 = sx; syStatic2 = sy; szStatic2 = sz;
// total pixel count for each image
long long int totalSize = sx*sy*sz;
// ****************** Processing Starts*****************//
// variables for memory and time cost records
clock_t ctime0, ctime1, ctime2, ctime3, ctime4;
ctime0 = clock();
// *** no registration
if (affMethod == 0) {
if (flagTmx)
(void)affinetrans3d1(d_reg, iTmx, d_img2, sx, sy, sz, sx, sy, sz);
else {
hipMemcpy(d_reg, d_img2, totalSize * sizeof(float), hipMemcpyDeviceToDevice);
for (int j = 0; j < NDIM; j++) iTmx[j] = 0;
iTmx[0] = iTmx[5] = iTmx[10] = 1;
}
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...no registration performed!\n");
}
return 0;
}
// *** registration
// for powell searching
affCoef = (float *)malloc((NDIM) * sizeof(float));
float *affCoefInitial = (float *)malloc((NDIM) * sizeof(float));
static float *p = (float *)malloc((NDIM + 1) * sizeof(float));
int iter;
float fret, **xi;
xi = matrix(1, NDIM, 1, NDIM);
for (int i = 1; i <= NDIM; i++)
for (int j = 1; j <= NDIM; j++)
xi[i][j] = (i == j ? 1.0 : 0.0);
for (int j = 0; j < NDIM; j++) affCoefInitial[j] = 0;
affCoefInitial[0] = 1;
affCoefInitial[5] = 1;
affCoefInitial[10] = 1;
float *affCoefTemp = (float *)malloc((NDIM) * sizeof(float));
float **xi_dof9;
static float *p_dof9 = (float *)malloc((10) * sizeof(float));
xi_dof9 = matrix(1, 9, 1, 9);
// **** allocate memory for the images:
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *d_Array;
// *****************************************************
// ************** Start processing ******************
double
sumImg1 = 0,
sumImg2 = 0,
sumSqr1 = 0;
// ****** the definition of 12 DOF coefficients is totally diffrent with that of 3 DOF, 6 DOF, 7 DOF or 9 DOF;
// if related to 3 DOF, 6 DOF, 7 DOF or 9 DOF (e.i. affMethod = 1, 2, 3, 4, 6, 7)
// then perfrom initial affine transformation based on input matrix
// *initialize transformation matrix
if (flagTmx) {
if (affMethod == 5) {
// use input matrix as initialization if inputTmx is true
memcpy(affCoefInitial, iTmx, NDIM * sizeof(float));
}
else {
// make affine transformation
(void)affinetrans3d1(d_reg, iTmx, d_img2, sx, sy, sz, sx, sy, sz);
}
}
if(affMethod != 5) {
xi_dof9 = matrix(1, 9, 1, 9);
for (int i = 1; i <= 9; i++)
for (int j = 1; j <= 9; j++)
xi_dof9[i][j] = (i == j ? 1.0 : 0.0);
p_dof9[0] = 0;
p_dof9[1] = 0; p_dof9[2] = 0; p_dof9[3] = 0;
p_dof9[4] = 0; p_dof9[5] = 0; p_dof9[6] = 0;
p_dof9[7] = 1; p_dof9[8] = 1; p_dof9[9] = 1;
}
// *** preprocess source image
if ((flagTmx)&&(affMethod != 5)) { // based on tranformed image
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
else {//based on input d_img2
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_reg, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
// transfer source image into GPU array (later converted to texture memory)
hipMalloc3DArray(&d_Array, &channelDesc, make_hipExtent(sx, sy, sz));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_Array, channelDesc, d_reg, sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 2 is zero, empty image input or empty image after initial transformation **** \n");
exit(1);
}
// *** preprocess target image
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
addvaluegpu(d_reg, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
addvaluegpu(d_reg, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
cudaCheckErrors("****Image preprocessing fails...");
// *** 3D registration begains
// Create 3D texture for source image
BindTexture(d_Array, channelDesc);
// make target image as static
d_imgStatic = d_reg;
// calculate initial cost function value and time cost for each sub iteration
ctime1 = clock();
dof9Flag = false;
matrix2p(affCoefInitial, p);
ctime2 = clock();
records[1] = -costfunc(p);
records[4] = (float)(ctime2 - ctime1);
if (verbose) {
printf("...initial cross correlation value: %f;\n", records[1]);
printf("...time cost for single sub iteration: %f ms;\n", records[4]);
}
itNumStatic = 0;
switch (affMethod) {
case 1:
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 2:
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 3:
dof9Flag = true;
dofNum = 7;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 4:
dof9Flag = true;
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 5:
dof9Flag = false;
dofNum = 12;
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 6:
// do 6 DOF --> 12 DOF
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 7:
// do 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 3 DOF: %f;\n", -fret);
}
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, 0.005, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 9 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
default:
printf("\n ****Wrong affine registration method is setup, no registraiton performed !!! **** \n");
}
if ((flagTmx) && (affMethod != 5)) {
matrixmultiply(affCoefTemp, iTmx, affCoef); //final transformation matrix
memcpy(affCoef, affCoefTemp, NDIM * sizeof(float));
}
UnbindTexture();
memcpy(iTmx, affCoef, NDIM * sizeof(float));
ctime3 = clock();
records[3] = -fret; // negative of the mimized cost function value
records[5] = (float)itNumStatic;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
if (verbose) {
printf("...optimized cross correlation value: %f;\n", records[3]);
printf("...total sub iteration number: %d;\n", int(records[5]));
printf("...time cost for all iterations: %f s;\n", records[6]);
}
// ****Perform affine transformation with optimized coefficients****//
cudacopydevicetoarray(d_Array, channelDesc, d_img2, sx, sy, sz);
BindTexture(d_Array, channelDesc);
CopyTranMatrix(affCoef, NDIM * sizeof(float));
affineTransform(d_reg, sx, sy, sz, sx, sy, sz);
UnbindTexture();
free(affCoefTemp);
free(p_dof9);
free_matrix(xi_dof9, 1, 9, 1, 9);
free(affCoef);
free(affCoefInitial);
free(p);
free_matrix(xi, 1, NDIM, 1, NDIM);
//free GPU variables
hipFreeArray(d_Array);
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...time cost for registration: %f s;\n", records[7]);
}
return 0;
}
extern "C"
int reg3d_affine2(float *d_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
// **** affine registration when GPU memory is insufficient: 1 image + 1 cuda array ***
/*
*** affine registration method:
0: no registration, transform d_img2 based on input matrix;
1: translation only;
2: rigid body;
3: 7 degrees of freedom (translation, rotation, scaling equally in 3 dimensions)
4: 9 degrees of freedom(translation, rotation, scaling);
5: 12 degrees of freedom;
6: rigid body first, then do 12 degrees of freedom;
7: 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
*** flagTmx:
true: use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
// ************get basic input images information ******************
// image size
sxStatic1 = sx; syStatic1 = sy; szStatic1 = sz;
sxStatic2 = sx; syStatic2 = sy; szStatic2 = sz;
// total pixel count for each image
long long int totalSize = sx*sy*sz;
// ****************** Processing Starts*****************//
// variables for memory and time cost records
clock_t ctime0, ctime1, ctime2, ctime3, ctime4;
ctime0 = clock();
// *** no registration
if (affMethod == 0) {
if (flagTmx)
(void)affinetrans3d2(d_reg, iTmx, h_img2, sx, sy, sz, sx, sy, sz);
else {
hipMemcpy(d_reg, h_img2, totalSize * sizeof(float), hipMemcpyHostToDevice);
for (int j = 0; j < NDIM; j++) iTmx[j] = 0;
iTmx[0] = iTmx[5] = iTmx[10] = 1;
}
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...no registration performed!\n");
}
return 0;
}
// *** registration
// for powell searching
affCoef = (float *)malloc((NDIM) * sizeof(float));
float *affCoefInitial = (float *)malloc((NDIM) * sizeof(float));
static float *p = (float *)malloc((NDIM + 1) * sizeof(float));
int iter;
float fret, **xi;
xi = matrix(1, NDIM, 1, NDIM);
for (int i = 1; i <= NDIM; i++)
for (int j = 1; j <= NDIM; j++)
xi[i][j] = (i == j ? 1.0 : 0.0);
for (int j = 0; j < NDIM; j++) affCoefInitial[j] = 0;
affCoefInitial[0] = 1;
affCoefInitial[5] = 1;
affCoefInitial[10] = 1;
float *affCoefTemp = (float *)malloc((NDIM) * sizeof(float));
float **xi_dof9;
static float *p_dof9 = (float *)malloc((10) * sizeof(float));
xi_dof9 = matrix(1, 9, 1, 9);
// **** allocate memory for the images:
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *d_Array;
float *h_imgTemp = (float *)malloc(totalSize * sizeof(float));
// *****************************************************
// ************** Start processing ******************
double
sumImg1 = 0,
sumImg2 = 0,
sumSqr1 = 0;
// ****** the definition of 12 DOF coefficients is totally diffrent with that of 3 DOF, 6 DOF, 7 DOF or 9 DOF;
// if related to 3 DOF, 6 DOF, 7 DOF or 9 DOF (e.i. affMethod = 1, 2, 3, 4, 6, 7)
// then perfrom initial affine transformation based on input matrix
// *initialize transformation matrix
if (flagTmx) {
if (affMethod == 5) {
// use input matrix as initialization if inputTmx is true
memcpy(affCoefInitial, iTmx, NDIM * sizeof(float));
}
else {
// make affine transformation
(void)affinetrans3d2(d_reg, iTmx, h_img2, sx, sy, sz, sx, sy, sz);
}
}
if (affMethod != 5) {
xi_dof9 = matrix(1, 9, 1, 9);
for (int i = 1; i <= 9; i++)
for (int j = 1; j <= 9; j++)
xi_dof9[i][j] = (i == j ? 1.0 : 0.0);
p_dof9[0] = 0;
p_dof9[1] = 0; p_dof9[2] = 0; p_dof9[3] = 0;
p_dof9[4] = 0; p_dof9[5] = 0; p_dof9[6] = 0;
p_dof9[7] = 1; p_dof9[8] = 1; p_dof9[9] = 1;
}
// *** preprocess source image
if ((flagTmx) && (affMethod != 5)) { // based on tranformed image
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
else {//based on input d_img2
hipMemcpy(d_reg, h_img2, totalSize * sizeof(float), hipMemcpyHostToDevice);
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
// transfer source image into GPU array (later converted to texture memory)
hipMalloc3DArray(&d_Array, &channelDesc, make_hipExtent(sx, sy, sz));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_Array, channelDesc, d_reg, sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 2 is zero, empty image input or empty image after initial transformation **** \n");
exit(1);
}
// *** preprocess target image
hipMemcpy(d_reg, h_img1, totalSize * sizeof(float), hipMemcpyHostToDevice);
sumImg1 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg1) / float(totalSize), sx, sy, sz);
hipMemcpy(h_imgTemp, d_reg, totalSize * sizeof(float), hipMemcpyDeviceToHost);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
hipMemcpy(d_reg, h_imgTemp, totalSize * sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("****Image preprocessing fails...");
// *** 3D registration begains
// Create 3D texture for source image
BindTexture(d_Array, channelDesc);
// make target image as static
d_imgStatic = d_reg;
// calculate initial cost function value and time cost for each sub iteration
ctime1 = clock();
dof9Flag = false;
matrix2p(affCoefInitial, p);
ctime2 = clock();
records[1] = -costfunc(p);
records[4] = (float)(ctime2 - ctime1);
if (verbose) {
printf("...initial cross correlation value: %f;\n", records[1]);
printf("...time cost for single sub iteration: %f ms;\n", records[4]);
}
itNumStatic = 0;
switch (affMethod) {
case 1:
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 2:
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 3:
dof9Flag = true;
dofNum = 7;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 4:
dof9Flag = true;
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 5:
dof9Flag = false;
dofNum = 12;
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 6:
// do 6 DOF --> 12 DOF
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 7:
// do 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 3 DOF: %f;\n", -fret);
}
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, 0.005, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 9 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
default:
printf("\n ****Wrong affine registration method is setup, no registraiton performed !!! **** \n");
}
if ((flagTmx) && (affMethod != 5)) {
matrixmultiply(affCoefTemp, iTmx, affCoef); //final transformation matrix
memcpy(affCoef, affCoefTemp, NDIM * sizeof(float));
}
UnbindTexture();
memcpy(iTmx, affCoef, NDIM * sizeof(float));
ctime3 = clock();
records[3] = -fret; // negative of the mimized cost function value
records[5] = (float)itNumStatic;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
if (verbose) {
printf("...optimized cross correlation value: %f;\n", records[3]);
printf("...total sub iteration number: %d;\n", int(records[5]));
printf("...time cost for all iterations: %f s;\n", records[6]);
}
// ****Perform affine transformation with optimized coefficients****//
cudacopyhosttoarray(d_Array, channelDesc, h_img2, sx, sy, sz);
BindTexture(d_Array, channelDesc);
CopyTranMatrix(affCoef, NDIM * sizeof(float));
affineTransform(d_reg, sx, sy, sz, sx, sy, sz);
UnbindTexture();
free(h_imgTemp);
free(affCoefTemp);
free(p_dof9);
free_matrix(xi_dof9, 1, 9, 1, 9);
free(affCoef);
free(affCoefInitial);
free(p);
free_matrix(xi, 1, NDIM, 1, NDIM);
//free GPU variables
hipFreeArray(d_Array);
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...time cost for registration: %f s;\n", records[7]);
}
return 0;
}
// Deconvolution
extern "C"
void genOTFgpu(fComplex *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, bool normFlag) {
long long int totalSizeIn = sx2 * sy2 * sz2;
long long int totalSizeOut = sx * sy * sz;
long long int totalSizeMax = (totalSizeIn > totalSizeOut)?totalSizeIn:totalSizeOut;
float *d_temp = NULL;
cudaStatus = hipMalloc((void **)&d_temp, totalSizeMax * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: GPU memory allocating error when calculating OTF \n");
exit(1);
}
if (normFlag) {
double sumValue = sum3Dgpu(d_idata, sx2, sy2, sz2);
multivaluegpu(d_temp, d_idata, (float)(1 / sumValue), sx2, sy2, sz2);
}
else
hipMemcpy(d_temp, d_idata, totalSizeIn * sizeof(float), hipMemcpyDeviceToDevice);
if ((sx<sx2) || (sy<sy2) || (sz<sz2)) {
alignsize3Dgpu((float *)d_odata, d_temp, sx, sy, sz, sx2, sy2, sz2);
padPSFgpu(d_temp, (float *)d_odata, sx, sy, sz, sx, sy, sz);
}
else {
padPSFgpu((float *)d_odata, d_temp, sx, sy, sz, sx2, sy2, sz2);
hipMemcpy(d_temp, d_odata, totalSizeOut * sizeof(float), hipMemcpyDeviceToDevice);
}
hipfftHandle
fftPlanFwd;
hipfftPlan3d(&fftPlanFwd, sx, sy, sz, HIPFFT_R2C);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_temp, (hipfftComplex *)d_odata);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error when calculating OTF \n");
exit(1);
}
hipFree(d_temp);
hipfftDestroy(fftPlanFwd);
}
extern "C"
int decon_singleview_OTF0(float *h_decon, float *h_img, fftwf_complex *h_OTF, fftwf_complex *h_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface on CPU ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
clock_t start, end;
start = clock();
float *h_StackA = h_img, *h_StackE = h_decon;
float *h_StackT = (float *)malloc(totalSize * sizeof(float));
fftwf_complex *h_StackESpectrum = (fftwf_complex *)malloc(totalSizeSpectrum * sizeof(fftwf_complex));
// initialize estimation
maxvaluecpu(h_StackA, h_StackA, (float)(SMALLVALUE), totalSize);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue = (float)sumcpu(h_StackA, totalSize);
memset(h_StackE, 0, totalSize * sizeof(float));
addvaluecpu(h_StackE, h_StackE, meanValue, totalSize);
}
else { // use measured images as initial
memcpy(h_StackE, h_StackA, totalSize * sizeof(float));
}
fftwf_plan stackE2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackE, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan stackT2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackT, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan spectrum2StackT = fftwf_plan_dft_c2r_3d(sx, sy, sz, h_StackESpectrum, h_StackT, FFTW_MEASURE);
printf("...Start CPU Decon\n");
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
divcpu(h_StackT, h_StackA, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
}
free(h_StackT);
free(h_StackESpectrum);
fftwf_destroy_plan(stackE2Spectrum);
fftwf_destroy_plan(stackT2Spectrum);
fftwf_destroy_plan(spectrum2StackT);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_singleview_OTF1(float *d_decon, float *d_img, fComplex *d_OTF, fComplex *d_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface when GPU memory is sufficient ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
size_t freeMem = 0, totalMem = 0;
hipfftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *d_StackA = d_img, *d_StackE = d_decon;
float *d_StackT = NULL;
fComplex *d_StackESpectrum = NULL;
hipMalloc((void **)&d_StackT, totalSize * sizeof(float));
hipMalloc((void **)&d_StackESpectrum, totalSizeSpectrum * sizeof(fComplex));
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
if(flagConstInitial) {// use constant mean value as initial
float meanValue = (float)sum3Dgpu(d_StackA, sx, sy, sz);
hipMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, meanValue, sx, sy, sz);
}
else { // use measured image as initial
hipMemcpy(d_StackE, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToDevice);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
hipfftPlan3d(&fftPlanFwd, sx, sy, sz, HIPFFT_R2C);
hipfftPlan3d(&fftPlanInv, sx, sy, sz, HIPFFT_C2R);
hipMemGetInfo(&freeMem, &totalMem);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// forward
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackA, d_StackT, sx, sy, sz);
// backward
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackT, (hipfftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz); // eliminate possible negative values
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
hipFree(d_StackT); hipFree(d_StackESpectrum);
hipfftDestroy(fftPlanFwd);
hipfftDestroy(fftPlanInv);
hipMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_singleview_OTF2(float *d_decon, float *d_img, fComplex *h_OTF, fComplex *h_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface when GPU memory is insufficient: 2 images + 2 fftPlans ***
// **** d_decon and d_img should have total size: sx * sy*(sz / 2 + 1) * sizeof(float) to store image spectrum
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
// *****
size_t freeMem = 0, totalMem = 0;
hipfftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *h_StackA = NULL, *h_StackE = NULL;
h_StackA = (float *)malloc(totalSize * sizeof(float));
h_StackE = (float *)malloc(totalSize * sizeof(float));
float *d_StackA = d_img, *d_StackE = d_decon;
fComplex *d_OTF = NULL, *d_OTF_bp = NULL, *d_StackESpectrum = NULL;
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
hipMemcpy(h_StackA, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToHost);
//if (initialFlag) // use measured image as initial
if (flagConstInitial) { // use constant mean value as initial
float meanValue = (float)sum3Dgpu(d_StackA, sx, sy, sz);
hipMemset(d_StackA, 0, totalSize * sizeof(float));
addvaluegpu(d_StackA, d_StackA, meanValue, sx, sy, sz);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
hipMemcpy(h_StackE, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToHost);
d_OTF = (fComplex *)d_StackA; // share the same physic memory
d_OTF_bp = (fComplex *)d_StackA; // share the same physic memory
d_StackESpectrum = (fComplex *)d_StackE; // share the same physic memory
// Create FFT plans
hipfftPlan3d(&fftPlanFwd, sx, sy, sz, HIPFFT_R2C);
hipfftPlan3d(&fftPlanInv, sx, sy, sz, HIPFFT_C2R);
hipMemGetInfo(&freeMem, &totalMem);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// forward
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackA, (hipfftComplex *)d_StackESpectrum);
hipMemcpy(d_OTF, h_OTF, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackA);
hipMemcpy(d_StackE, h_StackA, totalSize * sizeof(float), hipMemcpyHostToDevice);
div3Dgpu(d_StackA, d_StackE, d_StackA, sx, sy, sz);
// backward
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackA, (hipfftComplex *)d_StackESpectrum);
hipMemcpy(d_OTF_bp, h_OTF_bp, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackA);
hipMemcpy(d_StackE, h_StackE, totalSize * sizeof(float), hipMemcpyHostToDevice);
multi3Dgpu(d_StackA, d_StackE, d_StackA, sx, sy, sz);
maxvalue3Dgpu(d_StackA, d_StackA, float(SMALLVALUE), sx, sy, sz); // eliminate possible negative values
hipMemcpy(h_StackE, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToHost);
}
hipMemcpy(d_StackE, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToDevice);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
free(h_StackA); free(h_StackE);
hipfftDestroy(fftPlanFwd);
hipfftDestroy(fftPlanInv);
hipMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF0(float *h_decon, float *h_img1, float *h_img2, fftwf_complex *h_OTF1, fftwf_complex *h_OTF2, fftwf_complex *h_OTF_bp1,
fftwf_complex *h_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface on CPU ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
clock_t start, end;
start = clock();
float *h_StackA = h_img1, *h_StackB = h_img2, *h_StackE = h_decon;
float *h_StackT = (float *)malloc(totalSize * sizeof(float));
fftwf_complex *h_StackESpectrum = (fftwf_complex *)malloc(totalSizeSpectrum * sizeof(fftwf_complex));
// initialize estimation
maxvaluecpu(h_StackA, h_StackA, (float)(SMALLVALUE), totalSize);
maxvaluecpu(h_StackB, h_StackB, (float)(SMALLVALUE), totalSize);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sumcpu(h_StackA, totalSize);
float meanValue2 = (float)sumcpu(h_StackB, totalSize);
memset(h_StackE, 0, totalSize * sizeof(float));
addvaluecpu(h_StackE, h_StackE, (meanValue1 + meanValue2) / 2, totalSize);
}
else { // use measured images as initial
addcpu(h_StackE, h_StackA, h_StackB, totalSize);
multivaluecpu(h_StackE, h_StackE, (float)0.5, totalSize);
}
fftwf_plan stackE2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackE, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan stackT2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackT, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan spectrum2StackT = fftwf_plan_dft_c2r_3d(sx, sy, sz, h_StackESpectrum, h_StackT, FFTW_MEASURE);
printf("...Start CPU Decon\n");
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF1, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
//printf("here!\n");
divcpu(h_StackT, h_StackA, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp1, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
return 0;
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF2, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
divcpu(h_StackT, h_StackB, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp2, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
}
free(h_StackT);
free(h_StackESpectrum);
fftwf_destroy_plan(stackE2Spectrum);
fftwf_destroy_plan(stackT2Spectrum);
fftwf_destroy_plan(spectrum2StackT);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF1(float *d_decon, float *d_img1, float *d_img2, fComplex *d_OTF1, fComplex *d_OTF2, fComplex *d_OTF_bp1,
fComplex *d_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface when GPU memory is sufficient: 9 images + 2 fftPlans ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
size_t freeMem = 0, totalMem = 0;
hipfftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *d_StackA = d_img1, *d_StackB = d_img2, *d_StackE = d_decon;
float *d_StackT = NULL;
fComplex *d_StackESpectrum = NULL;
hipMalloc((void **)&d_StackT, totalSize * sizeof(float));
hipMalloc((void **)&d_StackESpectrum, totalSizeSpectrum * sizeof(fComplex));
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
maxvalue3Dgpu(d_StackB, d_StackB, (float)(SMALLVALUE), sx, sy, sz);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sum3Dgpu(d_StackA, sx, sy, sz);
float meanValue2 = (float)sum3Dgpu(d_StackB, sx, sy, sz);
hipMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, (meanValue1 + meanValue2) / 2, sx, sy, sz);
}
else { // use measured images as initial
add3Dgpu(d_StackE, d_StackA, d_StackB, sx, sy, sz);
multivaluegpu(d_StackE, d_StackE, (float)0.5, sx, sy, sz);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
hipfftPlan3d(&fftPlanFwd, sx, sy, sz, HIPFFT_R2C);
hipfftPlan3d(&fftPlanInv, sx, sy, sz, HIPFFT_C2R);
hipMemGetInfo(&freeMem, &totalMem);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// ### 1st view
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF1, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackA, d_StackT, sx, sy, sz);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackT, (hipfftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp1, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);//
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
// ### 2nd view
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);//
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF2, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackB, d_StackT, sx, sy, sz);//
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackT, (hipfftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp2, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
hipFree(d_StackT); hipFree(d_StackESpectrum);
hipfftDestroy(fftPlanFwd);
hipfftDestroy(fftPlanInv);
hipMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF2(float *d_decon, float *d_img1, float *h_img2, fComplex *h_OTF1, fComplex *h_OTF2, fComplex *h_OTF_bp1,
fComplex *h_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface when GPU memory is insufficient: 2 images + 2 fftPlans ***
// **** d_decon and d_img should have total size: sx * sy*(sz / 2 + 1) * sizeof(float) to store image spectrum
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
// *****
size_t freeMem = 0, totalMem = 0;
hipfftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *h_StackA = NULL, *h_StackB = NULL, *h_StackE = NULL;
h_StackA = (float *)malloc(totalSize * sizeof(float));
h_StackB = (float *)malloc(totalSize * sizeof(float));
h_StackE = (float *)malloc(totalSize * sizeof(float));
float *d_StackA = d_img1, *d_StackE = d_decon;
float *d_StackB = NULL;
fComplex *d_OTF = NULL, *d_StackESpectrum = NULL;
d_StackESpectrum = (fComplex *)d_StackA;
d_OTF = (fComplex *)d_StackE;
cudaStatus = hipGetLastError();
// initialize estimation
hipMalloc((void **)&d_StackB, totalSize * sizeof(float));
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
hipMemcpy(d_StackB, h_img2, totalSize * sizeof(float), hipMemcpyHostToDevice);
maxvalue3Dgpu(d_StackB, d_StackB, (float)(SMALLVALUE), sx, sy, sz);
hipMemcpy(h_StackA, d_StackA, totalSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_StackB, d_StackB, totalSize * sizeof(float), hipMemcpyDeviceToHost);
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sum3Dgpu(d_StackA, sx, sy, sz);
float meanValue2 = (float)sum3Dgpu(d_StackB, sx, sy, sz);
hipMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, (meanValue1 + meanValue2) / 2, sx, sy, sz);
}
else { // use measured images as initial
add3Dgpu(d_StackE, d_StackA, d_StackB, sx, sy, sz);
multivaluegpu(d_StackE, d_StackE, (float)0.5, sx, sy, sz);
}
hipMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_StackB); // release temperary variable
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
hipfftPlan3d(&fftPlanFwd, sx, sy, sz, HIPFFT_R2C);
hipfftPlan3d(&fftPlanInv, sx, sy, sz, HIPFFT_C2R);
hipMemGetInfo(&freeMem, &totalMem);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
//printf("...Processing iteration %d\n", it);
// ### 1st view
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);
hipMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(d_OTF, h_OTF1, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackE);
hipMemcpy(d_StackA, h_StackA, totalSize * sizeof(float), hipMemcpyHostToDevice);
div3Dgpu(d_StackE, d_StackA, d_StackE, sx, sy, sz);
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);
hipMemcpy(d_OTF, h_OTF_bp1, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackE);
hipMemcpy(d_StackA, h_StackE, totalSize * sizeof(float), hipMemcpyHostToDevice);
multi3Dgpu(d_StackE, d_StackE, d_StackA, sx, sy, sz);//
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
// ### 2nd view
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);//
hipMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(d_OTF, h_OTF2, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackE);
hipMemcpy(d_StackA, h_StackB, totalSize * sizeof(float), hipMemcpyHostToDevice);
div3Dgpu(d_StackE, d_StackA, d_StackE, sx, sy, sz);//
hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_StackE, (hipfftComplex *)d_StackESpectrum);
hipMemcpy(d_OTF, h_OTF_bp2, totalSizeSpectrum * sizeof(fComplex), hipMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_StackESpectrum, (hipfftReal *)d_StackE);
hipMemcpy(d_StackA, h_StackE, totalSize * sizeof(float), hipMemcpyHostToDevice);
multi3Dgpu(d_StackE, d_StackE, d_StackA, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
free(h_StackA); free(h_StackB); free(h_StackE);
hipfftDestroy(fftPlanFwd);
hipfftDestroy(fftPlanInv);
hipMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
#undef SMALLVALUE
#undef NDIM
| 566dea324388f7ab66e7a948b5402362aefa3387.cu | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
// Includes CUDA
//#include <cuda.h>
#include <cuda_runtime.h>
//
#include <memory.h>
#include "device_launch_parameters.h"
#include <cufft.h>
//#include <cufftw.h> // ** cuFFT also comes with CPU-version FFTW, but seems not to work when image size is large.
#include "fftw3.h"
#include "cukernel.cuh"
extern "C" {
#include "powell.h"
}
#include "apifunc_internal.h"
#define SMALLVALUE 0.01
#define NDIM 12
cudaError_t cudaStatus;
#define cudaCheckErrors(msg) \
do { \
cudaStatus = cudaGetLastError(); \
if (cudaStatus != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(cudaStatus), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
extern "C"
bool isPow2(int x)
{
return ((x&(x - 1)) == 0);
};
//Round a / b to nearest higher integer value
inline long long int iDivUp(long long int a, long long int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
inline long long int iAlignUp(long long int a, long long int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
int snapTransformSize(int dataSize)//
{
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--)
if (dataSize & (1U << hiBit))
{
break;
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize)
{
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 128)
{
return hiPOT;
}
else
{
return iAlignUp(dataSize, 64);
}
}
//////////////// Basic math functions /////////////////
// CPU functions
// sum
template <class T>
double sumcpu(T *h_idata, size_t totalSize) {
double sumValue = 0;
for (size_t i = 0; i < totalSize; i++) {
sumValue += (double)h_idata[i];
}
return sumValue;
}
template double sumcpu<int>(int *h_idata, size_t totalSize);
template double sumcpu<float>(float *h_idata, size_t totalSize);
template double sumcpu<double>(double *h_idata, size_t totalSize);
// add
template <class T>
void addcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] + h_idata2[i];
}
template void addcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void addcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void addcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
template <class T>
void addvaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize){
const T b = h_idata2;
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] + b;
}
template void addvaluecpu<int>(int *h_odata, int *h_idata1, int h_idata2, size_t totalSize);
template void addvaluecpu<float>(float *h_odata, float *h_idata1, float h_idata2, size_t totalSize);
template void addvaluecpu<double>(double *h_odata, double *h_idata1, double h_idata2, size_t totalSize);
// subtract
template <class T>
void subcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] - h_idata2[i];
}
template void subcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void subcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void subcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
// multiply
template <class T>
void multicpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] * h_idata2[i];
}
template void multicpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void multicpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void multicpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
//divide
template <class T>
void divcpu(T *h_odata, T *h_idata1, T *h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] / h_idata2[i];
}
template void divcpu<int>(int *h_odata, int *h_idata1, int *h_idata2, size_t totalSize);
template void divcpu<float>(float *h_odata, float *h_idata1, float *h_idata2, size_t totalSize);
template void divcpu<double>(double *h_odata, double *h_idata1, double *h_idata2, size_t totalSize);
template <class T>
void multivaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize){
for (size_t i = 0; i < totalSize; i++)
h_odata[i] = h_idata1[i] * h_idata2;
}
template void multivaluecpu<int>(int *h_odata, int *h_idata1, int h_idata2, size_t totalSize);
template void multivaluecpu<float>(float *h_odata, float *h_idata1, float h_idata2, size_t totalSize);
template void multivaluecpu<double>(double *h_odata, double *h_idata1, double h_idata2, size_t totalSize);
extern "C"
void multicomplexcpu(fComplex *h_odata, fComplex *h_idata1, fComplex *h_idata2, size_t totalSize){
fComplex a;
fComplex b;
for (size_t i = 0; i < totalSize; i++){
a = h_idata1[i];
b = h_idata2[i];
h_odata[i].x = a.x*b.x - a.y*b.y;
h_odata[i].y = a.x*b.y + a.y*b.x;
}
}
// max3Dcpu: find max value and coordinates
template <class T>
T max3Dcpu(size_t *corXYZ, T *h_idata, size_t sx, size_t sy, size_t sz) {
T peakValue = h_idata[0];
T t;
size_t sx0 = 0, sy0 = 0, sz0 = 0;
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
t = h_idata[i + j * sx + k * sx * sy];
if (peakValue < t) {
peakValue = t;
sx0 = i;
sy0 = j;
sz0 = k;
}
}
}
}
corXYZ[0] = sx0; corXYZ[1] = sy0; corXYZ[2] = sz0;
return peakValue;
}
template int max3Dcpu<int>(size_t *corXYZ, int *h_idata, size_t sx, size_t sy, size_t sz);
template float max3Dcpu<float>(size_t *corXYZ, float *h_idata, size_t sx, size_t sy, size_t sz);
template double max3Dcpu<double>(size_t *corXYZ, double *h_idata, size_t sx, size_t sy, size_t sz);
// max with a single value
template <class T>
void maxvaluecpu(T *h_odata, T *h_idata1, T h_idata2, size_t totalSize) {
T a;
const T b = h_idata2;
for (size_t i = 0; i < totalSize; i++) {
a = h_idata1[i];
h_odata[i] = (a > b) ? a : b;
}
}
template void maxvaluecpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t totalSize);
template void maxvaluecpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t totalSize);
template void maxvaluecpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t totalSize);
template <class T>
void changestorageordercpu(T *h_odata, T *h_idata, size_t sx, size_t sy, size_t sz, int orderMode) {
//orderMode
// 1: change tiff storage order to C storage order
//-1: change C storage order to tiff storage order
if (orderMode == 1) {
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
h_odata[i*sy*sz + j*sz + k] = h_idata[k*sy*sx + j*sx + i];
}
}
}
}
else if (orderMode == -1) {//change C storage order to tiff storage order:
for (size_t i = 0; i < sx; i++) {
for (size_t j = 0; j < sy; j++) {
for (size_t k = 0; k < sz; k++) {
h_odata[k*sy*sx + j*sx + i] = h_idata[i*sy*sz + j*sz + k];
}
}
}
}
}
template void changestorageordercpu<int>(int *h_odata, int *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordercpu<float>(float *h_odata, float *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordercpu<double>(double *h_odata, double *h_idata, size_t sx, size_t sy, size_t sz, int orderMode);
///// GPU functions
//add
template <class T>
void add3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
add3Dkernel<T> <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void add3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void add3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void add3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
// add with a single value
template <class T>
void addvaluegpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
addvaluekernel<T> <<<grids, threads >>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void addvaluegpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void addvaluegpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void addvaluegpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
//subtract
template <class T>
void sub3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
sub3Dkernel<T> <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void sub3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void sub3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void sub3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
//multiply
template <class T>
void multi3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
multi3Dkernel<T> <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void multi3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void multi3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void multi3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
// multiply with a single value
template <class T>
void multivaluegpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
multivaluekernel<T> <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void multivaluegpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void multivaluegpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void multivaluegpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
//multiply float complex
extern "C"
void multicomplex3Dgpu(fComplex *d_odata, fComplex *d_idata1, fComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
multicomplex3Dkernel<<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
//multiply float complex and do normalization
extern "C"
void multicomplexnorm3Dgpu(fComplex *d_odata, fComplex *d_idata1, fComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
multicomplexnorm3Dkernel <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
//multiply double complex
extern "C"
void multidcomplex3Dgpu(dComplex *d_odata, dComplex *d_idata1, dComplex *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
multidcomplex3Dkernel<<<grids, threads >>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
//divide
template <class T>
void div3Dgpu(T *d_odata, T *d_idata1, T *d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
div3Dkernel<T> <<<grids, threads>>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void div3Dgpu<int>(int *d_odata, int *d_idata1, int *d_idata2, size_t sx, size_t sy, size_t sz);
template void div3Dgpu<float>(float *d_odata, float *d_idata1, float *d_idata2, size_t sx, size_t sy, size_t sz);
template void div3Dgpu<double>(double *d_odata, double *d_idata1, double *d_idata2, size_t sx, size_t sy, size_t sz);
//conjugation of complex
extern "C"
void conj3Dgpu(fComplex *d_odata, fComplex *d_idata, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
conj3Dkernel <<<grids, threads>>>(d_odata, d_idata, sx, sy, sz);
cudaThreadSynchronize();
}
// sumarization
// sumgpu 1: small data size
template <class T>
T sumgpu(T *d_idata, int totalSize){
int gridSize = iDivUp(totalSize, blockSize);
bool nIsPow2 = isPow2(totalSize);
int smemSize = (blockSize <= 32) ? 2 * blockSize * sizeof(T) : blockSize * sizeof(T);
T *h_temp = NULL, *d_temp = NULL;
h_temp = (T *)malloc(gridSize * sizeof(T));
cudaMalloc((void **)&d_temp, gridSize * sizeof(T));
sumgpukernel<T><<<gridSize, blockSize, smemSize>>>(
d_idata,
d_temp,
totalSize,
nIsPow2
);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_temp, gridSize * sizeof(T), cudaMemcpyDeviceToHost);
T sumValue = 0;
for (int i = 0; i < gridSize; i++){
sumValue += h_temp[i];
}
free(h_temp);
cudaFree(d_temp);
return sumValue;
}
template int sumgpu<int>(int *d_idata, int totalSize);
template float sumgpu<float>(float *d_idata, int totalSize);
template double sumgpu<double>(double *d_idata, int totalSize);
// sumgpu 2: huge data size (3D data)
template <class T>
double sum3Dgpu(T *d_idata, size_t sx, size_t sy, size_t sz){
size_t sxy = sx * sy;
double *h_temp = NULL, *d_temp = NULL;
h_temp = (double *)malloc(sxy * sizeof(double));
cudaMalloc((void **)&d_temp, sxy * sizeof(double));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
reduceZ<T> <<<grids, threads >>>(d_idata, d_temp, sx, sy, sz);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_temp, sxy * sizeof(double), cudaMemcpyDeviceToHost);
double sumValue = 0;
for (size_t i = 0; i < sxy; i++)
sumValue += h_temp[i];
free(h_temp);
cudaFree(d_temp);
return sumValue;
}
template double sum3Dgpu<int>(int *d_idata, size_t sx, size_t sy, size_t sz);
template double sum3Dgpu<float>(float *d_idata, size_t sx, size_t sy, size_t sz);
template double sum3Dgpu<double>(double *d_idata, size_t sx, size_t sy, size_t sz);
// sumgpu 3: small data (1D data)
template <class T>
T sumgpu1D(T *d_idata, size_t totalSize){
const size_t r = 5; // a rough number, need further optimization
size_t tempSize = r * blockSize;
T *h_temp = NULL, *d_temp = NULL;
h_temp = (T *)malloc(tempSize * sizeof(T));
cudaMalloc((void **)&d_temp, tempSize * sizeof(T));
sumgpu1Dkernel<T> <<<r, blockSize >>>(
d_idata,
d_temp,
totalSize
);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_temp, tempSize * sizeof(T), cudaMemcpyDeviceToHost);
T sumValue = 0;
for (int i = 0; i < tempSize; i++){
sumValue += h_temp[i];
}
free(h_temp);
cudaFree(d_temp);
return sumValue;
}
template int sumgpu1D<int>(int *d_idata, size_t totalSize);
template float sumgpu1D<float>(float *d_idata, size_t totalSize);
template double sumgpu1D<double>(double *d_idata, size_t totalSize);
// max3Dgpu: find max value and coordinates
template <class T>
T max3Dgpu(size_t *corXYZ, T *d_idata, size_t sx, size_t sy, size_t sz){
size_t sx0 = 0, sy0 = 0, sz0 = 0;
T *d_temp1 = NULL, *h_temp1 = NULL;
size_t *d_temp2 = NULL, *h_temp2 = NULL;
cudaMalloc((void **)&d_temp1, sx*sy *sizeof(T));
cudaMalloc((void **)&d_temp2, sx*sy *sizeof(size_t));
h_temp1 = (T *)malloc(sx*sy * sizeof(T));
h_temp2 = (size_t *)malloc(sx*sy * sizeof(size_t));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
maxZkernel<T> <<<grids, threads >>>(d_idata, d_temp1, d_temp2, sx, sy, sz);
cudaThreadSynchronize();
cudaMemcpy(h_temp1, d_temp1, sx*sy * sizeof(T), cudaMemcpyDeviceToHost);
cudaMemcpy(h_temp2, d_temp2, sx*sy * sizeof(size_t), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
T peakValue = h_temp1[0];
T t;
for (size_t i = 0; i < sx; i++){
for (size_t j = 0; j < sy; j++){
t = h_temp1[i + j * sx];
if (peakValue < t){
peakValue = t;
sx0 = i;
sy0 = j;
sz0 = h_temp2[i + j * sx];
}
}
}
corXYZ[0] = sx0; corXYZ[1] = sy0; corXYZ[2] = sz0;
free(h_temp1); free(h_temp2);
cudaFree(d_temp1); cudaFree(d_temp2);
return peakValue;
}
template int max3Dgpu<int>(size_t *corXYZ, int *d_idata, size_t sx, size_t sy, size_t sz);
template float max3Dgpu<float>(size_t *corXYZ, float *d_idata, size_t sx, size_t sy, size_t sz);
template double max3Dgpu<double>(size_t *corXYZ, double *d_idata, size_t sx, size_t sy, size_t sz);
// max with a single value
template <class T>
void maxvalue3Dgpu(T *d_odata, T *d_idata1, T d_idata2, size_t sx, size_t sy, size_t sz){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
maxvalue3Dgpukernel<T><<<grids, threads >>>(d_odata, d_idata1, d_idata2, sx, sy, sz);
cudaThreadSynchronize();
}
template void maxvalue3Dgpu<int>(int *d_odata, int *d_idata1, int d_idata2, size_t sx, size_t sy, size_t sz);
template void maxvalue3Dgpu<float>(float *d_odata, float *d_idata1, float d_idata2, size_t sx, size_t sy, size_t sz);
template void maxvalue3Dgpu<double>(double *d_odata, double *d_idata1, double d_idata2, size_t sx, size_t sy, size_t sz);
// maximum projection
template <class T>
void maxprojection(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int pDirection){
size_t psx, psy, psz;
if (pDirection == 1){
psx = sx; psy = sy; psz = sz;
}
if (pDirection == 2){
psx = sz; psy = sx; psz = sy;
}
if (pDirection == 3){
psx = sy; psy = sz; psz = sx;
}
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(psx, threads.x), iDivUp(psy, threads.y));
maxprojectionkernel<T> <<<grids, threads >>>(d_odata, d_idata, sx, sy, sz, psx, psy, psz, pDirection);
cudaThreadSynchronize();
}
template void maxprojection<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
template void maxprojection<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
template void maxprojection<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int pDirection);
//Other functions
template <class T>
void changestorageordergpu(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int orderMode){
//orderMode
// 1: change tiff storage order to C storage order
//-1: change C storage order to tiff storage order
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
changestorageordergpukernel<T><<<grids, threads>>>(d_odata, d_idata, sx, sy, sz, orderMode);
cudaThreadSynchronize();
}
template void changestorageordergpu<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordergpu<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
template void changestorageordergpu<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int orderMode);
// rotate 90/-90 degree by axis
template <class T>
void rotbyyaxis(T *d_odata, T *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection){
//rot direction
// 1: rotate 90 deg around Y axis
//-1: rotate -90 deg around Y axis
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
rotbyyaxiskernel<T> <<<grids, threads >>>(d_odata, d_idata, sx, sy, sz, rotDirection);
cudaThreadSynchronize();
}
template void rotbyyaxis<int>(int *d_odata, int *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
template void rotbyyaxis<float>(float *d_odata, float *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
template void rotbyyaxis<double>(double *d_odata, double *d_idata, size_t sx, size_t sy, size_t sz, int rotDirection);
/*
// rotate any degree by y axis: matrix for affine transformation
void rot3Dbyyaxis(float *d_odata, float theta, int sx, int sz, int sx2, int sz2){
// Rotation matrix:translation (-sx2/2, -sz2/2) --> rotation--> translation back(sx/2,sy/2)
// 1 0 0 sx / 2 cos(theta) 0 sin(theta) 0 1 0 0 -sx2/2
// 0 1 0 0 * 0 1 0 0 * 0 1 0 0
// 0 0 1 sz / 2 -sin(theta) 0 cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
d_odata[0] = cos(theta); d_odata[1] = 0; d_odata[2] = sin(theta);
d_odata[3] = sx / 2 - sx2 / 2 * cos(theta) - sz2 / 2 * sin(theta);
d_odata[4] = 0; d_odata[5] = 1; d_odata[6] = 0; d_odata[7] = 0;
d_odata[8] = -sin(theta); d_odata[9] = 0; d_odata[10] = cos(theta);
d_odata[11] = sz / 2 + sx2 / 2 * sin(theta) - sz2 / 2 * cos(theta);
}
*/
void p2matrix(float *m, float *x){
m[0] = x[4], m[1] = x[5], m[2] = x[6], m[3] = x[1];
m[4] = x[7], m[5] = x[8], m[6] = x[9], m[7] = x[2];
m[8] = x[10], m[9] = x[11], m[10] = x[12], m[11] = x[3];
/*
m[0] = x[1], m[1] = x[2], m[2] = x[3], m[3] = x[4];
m[4] = x[5], m[5] = x[6], m[6] = x[7], m[7] = x[8];
m[8] = x[9], m[9] = x[10], m[10] = x[11], m[11] = x[12];
*/
}
void matrix2p(float *m, float *x){
x[0] = 0;
x[1] = m[3], x[2] = m[7], x[3] = m[11], x[4] = m[0];
x[5] = m[1], x[6] = m[2], x[7] = m[4], x[8] = m[5];
x[9] = m[6], x[10] = m[8], x[11] = m[9], x[12] = m[10];
/*
x[1] = m[0], x[2] = m[1], x[3] = m[2], x[4] = m[3];
x[5] = m[4], x[6] = m[5], x[7] = m[6], x[8] = m[7];
x[9] = m[8], x[10] = m[9], x[11] = m[10], x[12] = m[11];
*/
}
extern "C" void matrixmultiply(float * m, float *m1, float *m2){//for transformation matrix calcution only
m[0] = m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8];
m[1] = m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9];
m[2] = m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10];
m[3] = m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3];
m[4] = m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8];
m[5] = m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9];
m[6] = m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10];
m[7] = m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7];
m[8] = m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8];
m[9] = m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9];
m[10] = m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10];
m[11] = m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11];
//**** 12 13 14 15 never change ****
//no need to calculate m[12,13,14,15]:0 0 0 1
/*
m[0] = m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8] + m1[3] * m2[12];
m[1] = m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9] + m1[3] * m2[13];
m[2] = m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10] + m1[3] * m2[14];
m[3] = m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3] * m2[15];
m[4] = m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8] + m1[7] * m2[12];
m[5] = m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9] + m1[7] * m2[13];
m[6] = m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10] + m1[7] * m2[14];
m[7] = m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7] * m2[15];
m[8] = m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8] + m1[11] * m2[12];
m[9] = m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9] + m1[11] * m2[13];
m[10] = m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10] + m1[11] * m2[14];
m[11] = m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11] * m2[15];
m[12] = m1[12] * m2[0] + m1[13] * m2[4] + m1[14] * m2[8] + m1[15] * m2[12];
m[13] = m1[12] * m2[1] + m1[13] * m2[5] + m1[14] * m2[9] + m1[15] * m2[13];
m[14] = m1[12] * m2[2] + m1[13] * m2[6] + m1[14] * m2[10] + m1[15] * m2[14];
m[15] = m1[12] * m2[3] + m1[13] * m2[7] + m1[14] * m2[11] + m1[15] * m2[15];
*/
}
extern "C" void rot2matrix(float * p_out, float theta, long long int sx, long long int sy, long long int sz, int rotAxis){
//p_out: 12 elements
//theta: rotation angle
//sx, sy, sz: images size
////rotAxis
// 1: rotate theta around X axis
// 2: rotate theta around Y axis
// 3: rotate theta around Z axis
long long int sNew;
float *p_temp, *p_temp1, *p_temp2, *p_temp3;
p_temp = (float *)malloc(16 * sizeof(float));
p_temp1 = (float *)malloc(16 * sizeof(float));
p_temp2 = (float *)malloc(16 * sizeof(float));
p_temp3 = (float *)malloc(16 * sizeof(float));
for (int i = 0; i < 15; i++){
p_temp[i] = p_temp1[i] = p_temp2[i] = p_temp3[i] = 0;
}
p_temp[15] = p_temp1[15] = p_temp2[15] = p_temp3[15] = 1; //**** 12 13 14 15 never change ****
// matrix: p_temp1 * p_temp2 * p_temp3
if (rotAxis == 1){//Rotate by x axis
// Rotation matrix:translation (0, -sx2/2, -sz2/2) --> rotation--> translation back(0,sy/2,sz/2)
// 1 0 0 0 1 0 0 0 1 0 0 0
// 0 1 0 sx / 2 * 0 cos(theta) sin(theta) 0 * 0 1 0 -sy2/2
// 0 0 1 sz / 2 0 -sin(theta) cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[7] = sy / 2; p_temp1[11] = sz / 2;
p_temp2[0] = 1; p_temp2[1] = 0; p_temp2[2] = 0; p_temp2[3] = 0;
p_temp2[4] = 0; p_temp2[5] = cos(theta); p_temp2[6] = sin(theta); p_temp2[7] = 0;
p_temp2[8] = 0; p_temp2[9] = -sin(theta); p_temp2[10] = cos(theta); p_temp2[11] = 0;
sNew = round(sqrt(sy * sy + sz*sz));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[7] = - sNew / 2; p_temp3[11] = - sNew / 2;
}
if (rotAxis == 2){//Rotate by y axis
// Rotation matrix:translation (-sx2/2, 0, -sz2/2) --> rotation--> translation back(sx/2,0,sz/2)
// 1 0 0 sx / 2 cos(theta) 0 -sin(theta) 0 1 0 0 -sx2/2
// 0 1 0 0 * 0 1 0 0 * 0 1 0 0
// 0 0 1 sz / 2 sin(theta) 0 cos(theta) 0 0 0 1 -sz2/2
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[3] = sx / 2; p_temp1[11] = sz / 2;
p_temp2[0] = cos(theta); p_temp2[1] = 0; p_temp2[2] = -sin(theta); p_temp2[3] = 0;
p_temp2[4] = 0; p_temp2[5] = 1; p_temp2[6] = 0; p_temp2[7] = 0;
p_temp2[8] = sin(theta); p_temp2[9] = 0; p_temp2[10] = cos(theta); p_temp2[11] = 0;
sNew = round(sqrt(sx * sx + sz*sz));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[3] = -sNew / 2; p_temp3[11] = -sNew / 2;
}
if (rotAxis == 3){//Rotate by z axis
// Rotation matrix:translation (-sx2/2,-sy2/2, 0) --> rotation--> translation back(sx/2,sy/2,0)
// 1 0 0 sx / 2 cos(theta) sin(theta) 0 0 1 0 0 -sx2/2
// 0 1 0 sy / 2 * -sin(theta) cos(theta) 0 0 * 0 1 0 -sy2/2
// 0 0 1 0 0 0 1 0 0 0 1 0
// 0 0 0 1 0 0 0 1 0 0 0 1
p_temp1[0] = p_temp1[5] = p_temp1[10] = 1;
p_temp1[3] = sx / 2; p_temp1[7] = sy / 2;
p_temp2[0] = cos(theta); p_temp2[1] = sin(theta); p_temp2[2] = 0; p_temp2[3] = 0;
p_temp2[4] = -sin(theta); p_temp2[5] = cos(theta); p_temp2[6] = 0; p_temp2[7] = 0;
p_temp2[8] = 0; p_temp2[9] = 0; p_temp2[10] = 1; p_temp2[11] = 0;
sNew = round(sqrt(sx * sx + sy*sy));
p_temp3[0] = p_temp3[5] = p_temp3[10] = 1;
p_temp3[3] = -sNew / 2; p_temp3[7] = -sNew / 2;
}
matrixmultiply(p_temp, p_temp1, p_temp2);
matrixmultiply(p_out, p_temp, p_temp3);
free(p_temp);
free(p_temp1);
free(p_temp2);
free(p_temp3);
}
extern "C" void dof9tomatrix(float * p_out, float *p_dof, int dofNum){
//p_out: 12 elements
//p_dof: 10 elements: 0 x y z alpha beta theda a b c
//dofNum: 3, 6, 7 or 9
float *p_temp1, *p_temp2, *p_temp3;
p_temp1 = (float *)malloc(16 * sizeof(float));
p_temp2 = (float *)malloc(16 * sizeof(float));
p_temp3 = (float *)malloc(16 * sizeof(float));
for (int i = 0; i < 15; i++){
p_temp1[i] = p_temp2[i] = p_temp3[i] = 0;
}
p_temp1[15] = p_temp2[15] = p_temp3[15] = 1; //**** 12 13 14 15 never change ****
float x, y, z, alpha, beta, theta, a, b, c;
if (dofNum == 3){//translation
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = 0;
beta = 0;
theta = 0;
a = 1;
b = 1;
c = 1;
}
else if (dofNum == 6){//rigid body: translation, rotation
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = 1;
b = 1;
c = 1;
}
else if (dofNum == 7){//translation,rotation, scale equelly in 3 dimemsions
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = p_dof[7];
b = p_dof[7];
c = p_dof[7];
}
else if (dofNum == 9){//translation,rotation,scale
x = p_dof[1];
y = p_dof[2];
z = p_dof[3];
alpha = p_dof[4] / 57.3;
beta = p_dof[5] / 57.3;
theta = p_dof[6] / 57.3;
a = p_dof[7];
b = p_dof[8];
c = p_dof[9];
}
//translation
// 1 0 0 x
// 0 1 0 y
// 0 0 1 z
// 0 0 0 1
p_temp2[3] = x;
p_temp2[7] = y;
p_temp2[11] = z;
// scaling
// a 0 0 0
// 0 b 0 0
// 0 0 c 0
// 0 0 0 1
p_temp2[0] = a;
p_temp2[5] = b;
p_temp2[10] = c;
// rotating by Z axis
// cos(alpha) sin(alpha) 0 0
// -sin(alpha) cos(alpha) 0 0
// 0 0 1 0
// 0 0 0 1
p_temp3[0] = cos(alpha); p_temp3[1] = sin(alpha); p_temp3[2] = 0; p_temp3[3] = 0;
p_temp3[4] = -sin(alpha); p_temp3[5] = cos(alpha); p_temp3[6] = 0; p_temp3[7] = 0;
p_temp3[8] = 0; p_temp3[9] = 0; p_temp3[10] = 1; p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_temp1, p_temp2, p_temp3);
// rotating by X axis
// 1 0 0 0
// 0 cos(beta) sin(beta) 0
// 0 -sin(beta) cos(beta) 0
// 0 0 0 1
p_temp3[0] = 1; p_temp3[1] = 0; p_temp3[2] = 0; p_temp3[3] = 0;
p_temp3[4] = 0; p_temp3[5] = cos(beta); p_temp3[6] = sin(beta); p_temp3[7] = 0;
p_temp3[8] = 0; p_temp3[9] = -sin(beta); p_temp3[10] = cos(beta); p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_temp2, p_temp1, p_temp3);
// rotating by Y axis
// cos(theta) 0 -sin(theta) 0
// 0 1 0 0
// sin(theta) 0 cos(theta) 0
// 0 0 0 1
p_temp3[0] = cos(theta); p_temp3[1] = 0; p_temp3[2] = -sin(theta); p_temp3[3] = 0;
p_temp3[4] = 0; p_temp3[5] = 1; p_temp3[6] = 0; p_temp3[7] = 0;
p_temp3[8] = sin(theta); p_temp3[9] = 0; p_temp3[10] = cos(theta); p_temp3[11] = 0;
//p_temp3[15] = 1;
matrixmultiply(p_out, p_temp2, p_temp3);
free(p_temp1);
free(p_temp2);
free(p_temp3);
}
template <class T>
void circshiftgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz){
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
circshiftgpukernel<T> <<<grids, threads >>>(d_odata, d_idata, sx, sy, sz, dx, dy, dz);
cudaThreadSynchronize();
}
template void circshiftgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void circshiftgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void circshiftgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template <class T>
void imshiftgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz) {
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
imshiftgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz, dx, dy, dz);
cudaThreadSynchronize();
}
template void imshiftgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void imshiftgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
template void imshiftgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int dx, long long int dy, long long int dz);
extern "C" void CopyTranMatrix(float *x, int dataSize){
cudaMemcpyToSymbol(d_aff, x, dataSize, 0, cudaMemcpyHostToDevice);
}
template <class T>
void cudacopyhosttoarray(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, T *h_idata, size_t sx, size_t sy, size_t sz){
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)h_idata, sx*sizeof(T), sx, sy);
copyParams.dstArray = d_Array;
copyParams.extent = make_cudaExtent(sx, sy, sz);
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaThreadSynchronize();
}
template void
cudacopyhosttoarray<unsigned short>(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, unsigned short *h_idata, size_t sx, size_t sy, size_t sz);
template void
cudacopyhosttoarray<float>(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, float *h_idata, size_t sx, size_t sy, size_t sz);
template <class T>
void cudacopydevicetoarray(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, T *d_idata, size_t sx, size_t sy, size_t sz){
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)d_idata, sx*sizeof(T), sx, sy);
copyParams.dstArray = d_Array;
copyParams.extent = make_cudaExtent(sx, sy, sz);
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(©Params);
cudaThreadSynchronize();
}
template void
cudacopydevicetoarray<unsigned short>(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, unsigned short *d_idata, size_t sx, size_t sy, size_t sz);
template void
cudacopydevicetoarray<float>(cudaArray *d_Array, cudaChannelFormatDesc channelDesc, float *d_idata, size_t sx, size_t sy, size_t sz);
extern "C" void BindTexture(cudaArray *d_Array, cudaChannelFormatDesc channelDesc){
// set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.addressMode[2] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
cudaBindTextureToArray(tex, d_Array, channelDesc);
cudaThreadSynchronize();
}
extern "C" void BindTexture2(cudaArray *d_Array, cudaChannelFormatDesc channelDesc) {
// set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.addressMode[2] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
cudaBindTextureToArray(tex2, d_Array, channelDesc);
cudaThreadSynchronize();
}
extern "C" void BindTexture16(cudaArray *d_Array, cudaChannelFormatDesc channelDesc){
// set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.addressMode[2] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; //NB coordinates in [0,1]
// Bind the array to the texture
cudaBindTextureToArray(tex16, d_Array, channelDesc);
cudaThreadSynchronize();
}
extern "C" void UnbindTexture(){
cudaUnbindTexture(tex);
cudaThreadSynchronize();
}
extern "C" void UnbindTexture2() {
cudaUnbindTexture(tex2);
cudaThreadSynchronize();
}
extern "C" void UnbindTexture16(){
cudaUnbindTexture(tex16);
cudaThreadSynchronize();
}
extern "C" void AccessTexture(float x, float y,float z){
dim3 threads(2, 2, 2);
accesstexturekernel <<<1, threads >>>(x, y, z);
cudaThreadSynchronize();
}
template <class T>
void affineTransform(T *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2){
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grid(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
affinetransformkernel<T><<<grid, threads >>>(d_s, sx, sy, sz, sx2, sy2, sz2);
cudaThreadSynchronize();
}
template void
affineTransform<unsigned short>(unsigned short *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2);
template void
affineTransform<float>(float *d_s, long long int sx, long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2);
float corrfunc(float *d_t, float sd_t, float *aff, long long int sx,
long long int sy, long long int sz, long long int sx2, long long int sy2, long long int sz2){
// temp bufs
long long int sxy = sx * sy;
double *d_temp1 = NULL, *d_temp2 = NULL;
cudaMalloc((void **)&d_temp1, sxy * sizeof(double));
cudaMalloc((void **)&d_temp2, sxy * sizeof(double));
//copy aff to GPU const
cudaMemcpyToSymbol(d_aff, aff, 12 * sizeof(float), 0, cudaMemcpyHostToDevice);// copy host affine matrix to device const
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
corrkernel<<<grids, threads>>>( d_t, // the source image is texture, trans matrix is const
d_temp1, d_temp2, sx, sy, sz, sx2, sy2, sz2);
cudaThreadSynchronize();
double sqrSum = 0, corrSum = 0;
if (sxy > 100000){ // if count more than 100000, use gpu to perform sum
sqrSum = sumgpu1D(d_temp1, sxy);
corrSum = sumgpu1D(d_temp2, sxy);
}
else{
double *h_temp = NULL;
h_temp = (double *)malloc(sx*sy * sizeof(double));
cudaMemcpy(h_temp, d_temp1, sxy * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < sxy; i++)
sqrSum += h_temp[i];
cudaMemcpy(h_temp, d_temp2, sxy * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < sxy; i++)
corrSum += h_temp[i];
free(h_temp);
}
cudaFree(d_temp1);
cudaFree(d_temp2);
if (sqrt(sqrSum) == 0) return -2.0;
return (float)(corrSum / sqrt(sqrSum)) / sd_t;
}
extern "C" void BindTexture2D(cudaArray *d_Array, cudaChannelFormatDesc channelDesc){
// set texture parameters
tex2D1.addressMode[0] = cudaAddressModeWrap;
tex2D1.addressMode[1] = cudaAddressModeWrap;
tex2D1.filterMode = cudaFilterModeLinear;
tex2D1.normalized = false; // access with normalized texture coordinates
// Bind the array to the texture
cudaBindTextureToArray(tex2D1, d_Array, channelDesc);
}
extern "C" void UnbindTexture2D(
){
cudaUnbindTexture(tex2D1);
}
extern "C"
void affineTransform2D(float *d_t, int sx, int sy, int sx2, int sy2){
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
affineTransform2Dkernel <<<grids, threads >>>(d_t, sx, sy, sx2, sy2);
cudaThreadSynchronize();
}
float corrfunc2D(float *d_t, float sd_t, float *aff, long long int sx, long long int sy, long long int sx2, long long int sy2){
//copy aff to GPU const
cudaMemcpyToSymbol(d_aff, aff, 6 * sizeof(float), 0, cudaMemcpyHostToDevice);// copy host affine matrix to device const
long long int totalSize = sx*sy;
float *d_sqr = NULL, *d_corr = NULL, *h_temp = NULL;
cudaMalloc((void **)&d_sqr, totalSize * sizeof(float));
cudaMalloc((void **)&d_corr, totalSize * sizeof(float));
h_temp = (float *)malloc(totalSize * sizeof(float));
dim3 threads(blockSize2Dx, blockSize2Dy, 1);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y));
corr2Dkernel <<<grids, threads >>>( // the other image is texture, trans matrix is const
d_t, d_sqr, d_corr, sx, sy, sx2, sy2);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_corr, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
double corrSum = sumcpu(h_temp, totalSize);
cudaMemcpy(h_temp, d_sqr, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
double sqrSum = sumcpu(h_temp, totalSize);
cudaFree(d_sqr);
cudaFree(d_corr);
free(h_temp);
if (sqrt(sqrSum) == 0) return -2.0;
return float(corrSum / sqrt(sqrSum))/sd_t;
}
///// CPU interpolation
float lerp(float x, float x1, float x2, float q00, float q01) {
return ((x2 - x) / (x2 - x1)) * q01 + ((x - x1) / (x2 - x1)) * q00;
}
float bilerp(float x, float y, float x1, float x2, float y1, float y2, float q11, float q12, float q21, float q22) {
float r1 = lerp(x, x1, x2, q11, q12);
float r2 = lerp(x, x1, x2, q21, q22);
return lerp(y, y1, y2, r1, r2);
}
float trilerp(float x, float y, float z, float x1, float x2, float y1, float y2, float z1, float z2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = bilerp(x, y, x1, x2, y1, y2, q111, q112, q121, q122);
float r2 = bilerp(x, y, x1, x2, y1, y2, q211, q212, q221, q222);
return lerp(z, z1, z2, r1, r2);
}
float ilerp(float x, float x1, float x2, float q00, float q01) {
return (x2 - x) * q00 + (x - x1) * q01;
}
float ibilerp(float x, float y, float x1, float x2, float y1, float y2, float q11, float q12, float q21, float q22) {
float r1 = ilerp(x, x1, x2, q11, q12);
float r2 = ilerp(x, x1, x2, q21, q22);
return ilerp(y, y1, y2, r1, r2);
}
float itrilerp(float x, float y, float z, float x1, float x2, float y1, float y2, float z1, float z2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = ibilerp(x, y, x1, x2, y1, y2, q111, q112, q121, q122);
float r2 = ibilerp(x, y, x1, x2, y1, y2, q211, q212, q221, q222);
return ilerp(z, z1, z2, r1, r2);
}
float ilerp2(float dx1, float dx2, float q00, float q01) {
return dx2 * q00 + dx1 * q01;
}
float ibilerp2(float dx1, float dx2, float dy1, float dy2, float q11, float q12, float q21, float q22) {
float r1 = ilerp2(dx1, dx2, q11, q12);
float r2 = ilerp2(dx1, dx2, q21, q22);
return ilerp2(dy1, dy2, r1, r2);
}
float itrilerp2(float dx1, float dx2, float dy1, float dy2, float dz1, float dz2,
float q111, float q112, float q121, float q122, float q211, float q212, float q221, float q222) {
float r1 = ibilerp2(dx1, dx2, dy1, dy2, q111, q112, q121, q122);
float r2 = ibilerp2(dx1, dx2, dy1, dy2, q211, q212, q221, q222);
return ilerp2(dz1, dz2, r1, r2);
}
//output[sz-k-1][j][i] = input[i][j][k]
//d_odata[(sz - k - 1)*sx*sy + j*sx + i] = d_idata[i*sy*sz + j*sz + k];
double corrfunccpu(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
double sqrSum = 0, corrSum = 0;
int x1, y1, z1, x2, y2, z2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int sxy = sx*sy, sxy2 = sx2*sy2;
for (int i = 0; i < sx; i++){
for (int j = 0; j < sy; j++){
for (int k = 0; k < sz; k++){
float ix = (float)i;
float iy = (float)j;
float iz = (float)k;
float tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
float ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
float tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = floor(tx); y1 = floor(ty); z1 = floor(tz);
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
if ((x1 >= 0) && (y1 >= 0) && (z1 >= 0) && (x2 < sx2) && (y2 < sy2) && (z2 < sz2)){
// [k*sy*sx + j*sx + i]
q1 = h_t[z1*sxy2 + y1*sx2 + x1];
q2 = h_t[z1*sxy2 + y1*sx2 + x2];
q3 = h_t[z1*sxy2 + y2*sx2 + x1];
q4 = h_t[z1*sxy2 + y2*sx2 + x2];
q5 = h_t[z2*sxy2 + y1*sx2 + x1];
q6 = h_t[z2*sxy2 + y1*sx2 + x2];
q7 = h_t[z2*sxy2 + y2*sx2 + x1];
q8 = h_t[z2*sxy2 + y2*sx2 + x2];
t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
s = h_s[k*sxy + j*sx + i];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
double corrfunccpu3(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5= aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
//t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
//t = 1;
}
else
t = 0;
s = h_s[i*syz + j*sz + k];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
double corrfunccpu2_old(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5 = aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int sxy = sx*sy, sxy2 = sx2*sy2, z1sxy2, z2sxy2, y1sx2, y2sx2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
z1sxy2 = z1*sxy2;
z2sxy2 = z2*sxy2;
y1sx2 = y1*sx2;
y2sx2 = y2*sx2;
q1 = h_t[z1sxy2 + y1sx2 + x1];
q2 = h_t[z1sxy2 + y1sx2 + x2];
q3 = h_t[z1sxy2 + y2sx2 + x1];
q4 = h_t[z1sxy2 + y2sx2 + x2];
q5 = h_t[z2sxy2 + y1sx2 + x1];
q6 = h_t[z2sxy2 + y1sx2 + x2];
q7 = h_t[z2sxy2 + y2sx2 + x1];
q8 = h_t[z2sxy2 + y2sx2 + x2];
//t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
//t = 1;
}
else
t = 0;
s = h_s[k*sxy + j*sx + i];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
void affinetransformcpu_old(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float t;
int sxy = sx*sy, sxy2 = sx2*sy2, z1sxy2, z2sxy2, y1sx2, y2sx2;
int syz = sy*sz, syz2 = sy2*sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
z1sxy2 = z1*sxy2;
z2sxy2 = z2*sxy2;
y1sx2 = y1*sx2;
y2sx2 = y2*sx2;
q1 = h_t[z1sxy2 + y1sx2 + x1];
q2 = h_t[z1sxy2 + y1sx2 + x2];
q3 = h_t[z1sxy2 + y2sx2 + x1];
q4 = h_t[z1sxy2 + y2sx2 + x2];
q5 = h_t[z2sxy2 + y1sx2 + x1];
q6 = h_t[z2sxy2 + y1sx2 + x2];
q7 = h_t[z2sxy2 + y2sx2 + x1];
q8 = h_t[z2sxy2 + y2sx2 + x2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = itrilerp(tx, ty, tz, x1, x2, y1, y2, z1, z2, q1, q2, q3, q4, q5, q6, q7, q8);
//t = dz2*(dy2*dx2*q1 + dy2*dx1*q2 + dy1*dx2*q3 + dy1*dx1*q4) + dz1*(dy2*dx2*q5 + dy2*dx1*q6 + dy1*dx2*q7 + dy1*dx1*q8);
}
else
t = 0;
h_s[k*sxy + j*sx + j] = t;
}
}
}
}
double corrfunccpu2(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
const float r0 = aff[0], r1 = aff[1], r2 = aff[2], r3 = aff[3], r4 = aff[4], r5 = aff[5],
r6 = aff[6], r7 = aff[7], r8 = aff[8], r9 = aff[9], r10 = aff[10], r11 = aff[11];
double sqrSum = 0, corrSum = 0;
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float s, t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = r0 * ix + r1 * iy + r2 * iz + r3;
ty = r4 * ix + r5 * iy + r6 * iz + r7;
tz = r8 * ix + r9 * iy + r10 * iz + r11;
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
s = h_s[i*syz + j*sz + k];
sqrSum += (double)t*t;
corrSum += (double)s*t;
}
}
}
return (corrSum / sqrt(sqrSum));
}
void affinetransformcpu(float *h_s,
float *h_t,// source stack
float *aff,
int sx,
int sy,
int sz,
int sx2,
int sy2,
int sz2
){
float ix, iy, iz, tx, ty, tz;
int x1, y1, z1, x2, y2, z2;
float dx1, dy1, dz1, dx2, dy2, dz2;
float q1, q2, q3, q4, q5, q6, q7, q8;
float t;
int syz = sy*sz, syz2 = sy2*sz2, x1syz2, x2syz2, y1sz2, y2sz2;
for (int i = 0; i < sx; i++){
ix = (float)i;
for (int j = 0; j < sy; j++){
iy = (float)j;
for (int k = 0; k < sz; k++){
iz = (float)k;
tx = aff[0] * ix + aff[1] * iy + aff[2] * iz + aff[3];
ty = aff[4] * ix + aff[5] * iy + aff[6] * iz + aff[7];
tz = aff[8] * ix + aff[9] * iy + aff[10] * iz + aff[11];
x1 = (int)tx; y1 = (int)ty; z1 = (int)tz;
x2 = x1 + 1; y2 = y1 + 1; z2 = z1 + 1;
dx1 = tx - (float)x1; dy1 = ty - (float)y1; dz1 = tz - (float)z1;
dx2 = 1 - dx1; dy2 = 1 - dy1; dz2 = 1 - dz1;
if (x1 >= 0 && y1 >= 0 && z1 >= 0 && x2 < sx2 && y2 < sy2 && z2 < sz2){
// [i*sy*sz + j*sz + k]
x1syz2 = x1*syz2;
x2syz2 = x2*syz2;
y1sz2 = y1*sz2;
y2sz2 = y2*sz2;
q1 = h_t[x1syz2 + y1sz2 + z1];
q2 = h_t[x2syz2 + y1sz2 + z1];
q3 = h_t[x1syz2 + y2sz2 + z1];
q4 = h_t[x2syz2 + y2sz2 + z1];
q5 = h_t[x1syz2 + y1sz2 + z2];
q6 = h_t[x2syz2 + y1sz2 + z2];
q7 = h_t[x1syz2 + y2sz2 + z2];
q8 = h_t[x2syz2 + y2sz2 + z2];
t = itrilerp2(dx1, dx2, dy1, dy2, dz1, dz2, q1, q2, q3, q4, q5, q6, q7, q8);
}
else
t = 0;
h_s[i*syz + j*sz + k] = t;
}
}
}
}
// CPU
template <class T>
void flipcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz) {
for (long long int i = 0; i < sx; i++) {
for (long long int j = 0; j < sy; j++) {
for (long long int k = 0; k < sz; k++) {
//d_odata[k*sy*sx + j*sx + i] = d_idata[(sz - k - 1) *sy*sx + (sy - j - 1)*sx + (sx - i - 1)];
h_odata[i*sy*sz + j*sz + k] = h_idata[(sx - i - 1) *sy*sz + (sy - j - 1)*sz + (sz - k - 1)];
}
}
}
}
template void flipcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz);
template void flipcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz);
template void flipcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz);
template <class T>
void padPSFcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = sx2 / 2; soy = sy2 / 2; soz = sz2 / 2;
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = x - sox; dy = y - soy; dz = z - soz;
if (dx < 0) dx += sx;
if (dy < 0) dy += sy;
if (dz < 0) dz += sz;
//d_PaddedPSF[dz][dy][dx] = d_PSF[z][y][x]
if (dx >= 0 && dx < sx && dy >= 0 && dy < sy && dz >= 0 && dz < sz) {
//d_odata[dz*sy*sx + dy*sx + dx] = d_idata[z*sy2*sx2 + y*sx2 + x];
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
}
template void
padPSFcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void padstackcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
long long int x, y, z;
for (long long int dx = 0; dx < sx; dx++) {
for (long long int dy = 0; dy < sy; dy++) {
for (long long int dz = 0; dz < sz; dz++) {
if (dx < sox) {
x = 0;
}
if (dy < soy) {
y = 0;
}
if (dz < soz) {
z = 0;
}
if (dx >= sox && dx < (sox + sx2)) {
x = dx - sox;
}
if (dy >= soy && dy < (soy + sy2)) {
y = dy - soy;
}
if (dz >= soz && dz < (soz + sz2)) {
z = dz - soz;
}
if (dx >= (sox + sx2)) {
x = sx2 - 1;
}
if (dy >= (soy + sy2)) {
y = sy2 - 1;
}
if (dz >= (soz + sz2)) {
z = sz2 - 1;
}
//d_odata[dz*sy*sx + dy*sx + dx] = d_idata[z*sy2*sx2 + y*sx2 + x];
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
template void
padstackcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2){
long long int sox, soy, soz;
sox = (sx2 - sx) / 2;
soy = (sy2 - sy) / 2;
soz = (sz2 - sz) / 2;
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = sox + x; dy = soy + y; dz = soz + z;
//d_odata[z*sy*sx + y*sx + x] = d_idata[dz*sy2*sx2 + dy*sx2 + dx];
h_odata[x*sy*sz + y*sz + z] = h_idata[dx*sy2*sz2 + dy*sz2 + dz];
}
}
}
}
template void
cropcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropcpu2(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz) {
long long int dx, dy, dz;
for (long long int x = 0; x < sx; x++) {
for (long long int y = 0; y < sy; y++) {
for (long long int z = 0; z < sz; z++) {
dx = sox + x; dy = soy + y; dz = soz + z;
h_odata[z*sy*sx + y*sx + x] = h_idata[dz*sy2*sx2 + dy*sx2 + dx];
//h_odata[x*sy*sz + y*sz + z] = h_idata[dx*sy2*sz2 + dy*sz2 + dz];
}
}
}
}
template void
cropcpu2<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropcpu2<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropcpu2<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template <class T>
void alignsize3Dcpu(T *h_odata, T *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
long long int x, y, z;
for (long long int dx = 0; dx < sx; dx++) {
for (long long int dy = 0; dy < sy; dy++) {
for (long long int dz = 0; dz < sz; dz++) {
x = dx - sox;
y = dy - soy;
z = dz - soz;
if ((x < 0) || (y < 0) || (z < 0) || (x >= sx2) || (y >= sy2) || (z >= sz2))
h_odata[dx*sy*sz + dy*sz + dz] = 0;
else
h_odata[dx*sy*sz + dy*sz + dz] = h_idata[x*sy2*sz2 + y*sz2 + z];
}
}
}
}
template void alignsize3Dcpu<int>(int *h_odata, int *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dcpu<float>(float *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dcpu<double>(double *h_odata, double *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
extern "C"
void genOTFcpu(fftwf_complex *h_odata, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, bool normFlag) {
long long int totalSizeIn = sx2 * sy2 * sz2;
long long int totalSizeOut = sx * sy * sz;
long long int totalSizeMax = (totalSizeIn > totalSizeOut) ? totalSizeIn : totalSizeOut;
float *h_temp = (float *)malloc(totalSizeMax * sizeof(float));
if (normFlag) {
double sumValue = sumcpu(h_idata, sx2 * sy2 * sz2);
multivaluecpu(h_temp, h_idata, (float)(1 / sumValue), sx2 * sy2 * sz2);
}
else
memcpy(h_temp, h_idata, totalSizeIn * sizeof(float));
if((sx<sx2)||(sy<sy2)||(sz<sz2)){
alignsize3Dcpu((float *)h_odata, h_temp, sx, sy, sz, sx2, sy2, sz2);
padPSFcpu(h_temp, (float *)h_odata, sx, sy, sz, sx, sy, sz);
}
else {
padPSFcpu((float *)h_odata, h_temp, sx, sy, sz, sx2, sy2, sz2);
memcpy(h_temp, h_odata, totalSizeOut * sizeof(float));
}
fftwf_plan image2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_temp, h_odata, FFTW_MEASURE);
fftwf_execute(image2Spectrum);
free(h_temp);
fftwf_destroy_plan(image2Spectrum);
}
// GPU
template <class T>
void flipgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz) {
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, blockSize3Dx), iDivUp(sy, blockSize3Dy), iDivUp(sz, blockSize3Dz));
flipgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz);
cudaThreadSynchronize();
}
template void flipgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz);
template void flipgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz);
template void flipgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz);
template <class T>
void padPSFgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = sx2 / 2; soy = sy2 / 2; soz = sz2 / 2;
cudaMemset(d_odata, 0, sx*sy*sz * sizeof(T));
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx2, threads.x), iDivUp(sy2, threads.y), iDivUp(sz2, threads.z));
padPSFgpukernel<T> << <grids, threads >> >(d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
cudaThreadSynchronize();
}
template void
padPSFgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padPSFgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void padstackgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
padstackgpukernel<T> << < grids, threads >> > (d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
cudaThreadSynchronize();
}
template void
padstackgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
padstackgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx2 - sx) / 2;
soy = (sy2 - sy) / 2;
soz = (sz2 - sz) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
cropgpukernel<T> <<< grids, threads >>> (d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
cudaThreadSynchronize();
}
template void
cropgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void
cropgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template <class T>
void cropgpu2(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz) {
assert(d_odata != d_idata);
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sz, threads.x), iDivUp(sy, threads.y), iDivUp(sx, threads.z));
cropgpukernel<T> <<< grids, threads >>> (d_odata, d_idata, sz, sy, sx, sz2, sy2, sx2, soz, soy, sox);
cudaThreadSynchronize();
}
template void
cropgpu2<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropgpu2<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template void
cropgpu2<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, long long int sox, long long int soy, long long int soz);
template <class T>
void alignsize3Dgpu(T *d_odata, T *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
assert(d_odata != d_idata);
long long int sox, soy, soz;
sox = (sx - sx2) / 2;
soy = (sy - sy2) / 2;
soz = (sz - sz2) / 2;
dim3 threads(blockSize3Dx, blockSize3Dy, blockSize3Dz);
dim3 grids(iDivUp(sx, threads.x), iDivUp(sy, threads.y), iDivUp(sz, threads.z));
alignsize3Dgpukernel<T> << < grids, threads >> > (d_odata, d_idata, sx, sy, sz, sx2, sy2, sz2, sox, soy, soz);
cudaThreadSynchronize();
}
template void alignsize3Dgpu<int>(int *d_odata, int *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dgpu<float>(float *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
template void alignsize3Dgpu<double>(double *d_odata, double *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2);
// Registration variables: 2D
static float *d_img2D = NULL;
static float *h_aff2D;
static long long int imx2D1, imy2D1, imx2D2, imy2D2;
static float valueStatic2D;
static int itNumStatic2D;
// Registration variables: 3D
static float *d_imgStatic = NULL;
static float valueStatic;
static long long int sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2;
static float *affCoef;
static int itNumStatic, dofNum;
static bool dof9Flag;
static float *h_s3D = NULL, *h_t3D = NULL;
float costfunc2D(float *x) {
h_aff2D[0] = x[1], h_aff2D[1] = x[2], h_aff2D[2] = x[3];
h_aff2D[3] = x[4], h_aff2D[4] = x[5], h_aff2D[5] = x[6];
float costValue = corrfunc2D(d_img2D, valueStatic2D, h_aff2D, imx2D1, imy2D1, imx2D2, imy2D2);
itNumStatic2D += 1;
return -costValue;
}
extern "C"
int affinetrans2d0(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sx2, long long int sy2) {
return 0;
}
extern "C"
// bug in affinetrans2d1
int affinetrans2d1(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sx2, long long int sy2) {
// total pixel count for each images
long long int totalSize1 = sx*sy;
long long int totalSize2 = sx2*sx2;
float *d_imgTemp = NULL;
cudaMalloc((void **)&d_imgTemp, totalSize1 * sizeof(float));
cudaCheckErrors("****Memory allocating fails... GPU out of memory !!!!*****\n");
cudaChannelFormatDesc channelDesc2D =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *d_Array2D;
cudaMallocArray(&d_Array2D, &channelDesc2D, sx2, sy2);
cudaCheckErrors("****Memory array allocating fails... GPU out of memory !!!!*****\n");
CopyTranMatrix(iTmx, 6 * sizeof(float));
cudaMemcpyToArray(d_Array2D, 0, 0, h_idata, totalSize2 * sizeof(float), cudaMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
affineTransform2D(d_imgTemp, sx, sy, sx2, sy2);
UnbindTexture2D;
cudaMemcpy(h_odata, d_imgTemp, totalSize1 * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_imgTemp);
cudaFreeArray(d_Array2D);
return 0;
}
extern "C"
int reg2d_phasor0(long long int *shiftXY, float *h_img1, float *h_img2, long long int sx, long long int sy) {
return 0;
}
extern "C"
int reg2d_phasor1(long long int *shiftXY, float *d_img1, float *d_img2, long long int sx, long long int sy) {
int totalSize = sx * sy;
int totalSizeSpectrum = sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
cudaMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
cudaMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
cufftHandle
fftPlanFwd,
fftPlanInv;
cufftPlan2d(&fftPlanFwd, sy, sx, CUFFT_R2C);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img1, (cufftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sy, (sx / 2 + 1), 1);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img2, (cufftComplex *)d_Spectrum2);
// multiplication and normalization
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sy, (sx / 2 + 1), 1);
cufftDestroy(fftPlanFwd);
cufftPlan2d(&fftPlanInv, sy, sx, CUFFT_C2R);
float *d_phasor1 = (float *)d_Spectrum1;
cufftExecC2R(fftPlanInv, (cufftComplex *)d_Spectrum2, (cufftReal *)d_phasor1);
cufftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_phasor1, sx, sy, 1, round(sx / 2), round(sy / 2), 0);
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, 1);
shiftXY[0] = corXYZ[0] - sx / 2;
shiftXY[1] = corXYZ[1] - sy / 2;
cudaFree(d_Spectrum1);
cudaFree(d_Spectrum2);
// compare 4 cases based on cross-correlation
long long int shiftX = shiftXY[0];
long long int shiftY = shiftXY[1];
long long int xabs = abs(shiftX), yabs = abs(shiftY);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx / beta)) || (yabs >(sy / beta))) {
float *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizeMaxCrop = totalSize;
cudaMalloc((void **)&d_imgT, totalSize * sizeof(float));
cudaMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
cudaMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
circshiftgpu(d_imgT, d_img2, sx, sy, 1, -shiftX, -shiftY, 0);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
int indx = 0, indy = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] >(sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] >(sy / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], 1, sx, sy, 1, imox[i], imoy[j], 0);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], 1, sx, sy, 1, imox[i], imoy[j], 0);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], 1);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXY[0] = shiftX - sx;
else
shiftXY[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXY[1] = shiftY - sy;
else
shiftXY[1] = shiftY + sy;
}
cudaFree(d_imgT);
cudaFree(d_crop1);
cudaFree(d_crop2);
}
return 0;
}
extern "C"
int reg2d_affine0(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy,
long long int sx2, long long int sy2, int affMethod, bool flagTmx, float FTOL, int itLimit, float *regRecords) {
// **** CPU affine registration for 2D images ***
return 0;
}
extern "C"
int reg2d_affine1(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy,
long long int sx2, long long int sy2, int affMethod, bool flagTmx, float FTOL, int itLimit, float *records) {
// **** GPU affine registration for 2D images ***
/*
*** flagTmx:
true : use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
imx2D1 = sx; imy2D1 = sy;
imx2D2 = sx2; imy2D2 = sy2;
// total pixel count for each images
long long int totalSize1 = imx2D1*imy2D1;
long long int totalSize2 = imx2D2*imy2D2;
long long int totalSizeMax = (totalSize1 > totalSize2) ? totalSize1 : totalSize2;
// ****************** Processing Starts***************** //
// variables for memory and time cost records
clock_t start, end, ctime1, ctime2, ctime3;
start = clock();
int iter;
float fret;
int DIM2D = 6;
h_aff2D = (float *)malloc(DIM2D * sizeof(float));
static float *p2D = (float *)malloc((DIM2D + 1) * sizeof(float));
float **xi2D;
xi2D = matrix(1, DIM2D, 1, DIM2D);
float *h_imgT = (float *)malloc(totalSizeMax * sizeof(float));
cudaMalloc((void **)&d_img2D, totalSize1 * sizeof(float));
cudaCheckErrors("****Memory allocating fails... GPU out of memory !!!!*****\n");
cudaChannelFormatDesc channelDesc2D =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *d_Array2D;
cudaMallocArray(&d_Array2D, &channelDesc2D, imx2D2, imy2D2);
cudaCheckErrors("****Memory array allocating fails... GPU out of memory !!!!*****\n");
if (flagTmx) {
memcpy(h_aff2D, iTmx, DIM2D * sizeof(float));
}
else {
h_aff2D[0] = 1, h_aff2D[1] = 0, h_aff2D[2] = (imx2D2 - imx2D1) / 2;
h_aff2D[3] = 0, h_aff2D[4] = 1, h_aff2D[5] = (imy2D2 - imy2D1) / 2;
}
p2D[0] = 0;
p2D[1] = h_aff2D[0], p2D[2] = h_aff2D[1], p2D[3] = h_aff2D[2];
p2D[4] = h_aff2D[3], p2D[5] = h_aff2D[4], p2D[6] = h_aff2D[5];
for (int i = 1; i <= DIM2D; i++)
for (int j = 1; j <= DIM2D; j++)
xi2D[i][j] = (i == j ? 1.0 : 0.0);
float meanValue = (float)sumcpu(h_img1, totalSize1) / totalSize1;
addvaluecpu(h_imgT, h_img1, -meanValue, totalSize1);
multicpu(h_reg, h_imgT, h_imgT, totalSize1);
double sumSqrA = sumcpu(h_reg, totalSize1);
valueStatic2D = float(sqrt(sumSqrA));
if (valueStatic2D == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
cudaMemcpy(d_img2D, h_imgT, totalSize1 * sizeof(float), cudaMemcpyHostToDevice);
meanValue = (float)sumcpu(h_img2, totalSize2) / totalSize2;
addvaluecpu(h_imgT, h_img2, -meanValue, totalSize2);
cudaMemcpyToArray(d_Array2D, 0, 0, h_imgT, totalSize2 * sizeof(float), cudaMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
cudaCheckErrors("****Fail to bind 2D texture!!!!*****\n");
itNumStatic2D = 0;
ctime1 = clock();
records[1] = -costfunc2D(p2D);
ctime2 = clock();
if (affMethod > 0) {
powell(p2D, xi2D, DIM2D, FTOL, &iter, &fret, costfunc2D, &itNumStatic2D, itLimit);
memcpy(iTmx, h_aff2D, DIM2D * sizeof(float));
}
UnbindTexture2D;
ctime3 = clock();
cudaMemcpyToArray(d_Array2D, 0, 0, h_img2, totalSize2 * sizeof(float), cudaMemcpyHostToDevice);
BindTexture2D(d_Array2D, channelDesc2D);
affineTransform2D(d_img2D, imx2D1, imy2D1, imx2D2, imy2D2);
UnbindTexture2D;
cudaMemcpy(h_reg, d_img2D, totalSize1 * sizeof(float), cudaMemcpyDeviceToHost);
records[3] = -fret;
records[4] = (float)(ctime2 - ctime1);
records[5] = itNumStatic2D;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
free(p2D);
free(h_aff2D);
free_matrix(xi2D, 1, DIM2D, 1, DIM2D);
free(h_imgT);
cudaFree(d_img2D);
cudaFreeArray(d_Array2D);
end = clock();
records[7] = (float)(end - start) / CLOCKS_PER_SEC;
return 0;
}
extern "C"
int affinetrans3d0(float *h_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
// cpu
return 0;
}
extern "C"
int affinetrans3d1(float *d_odata, float *iTmx, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *d_ArrayTemp;
cudaMalloc3DArray(&d_ArrayTemp, &channelDesc, make_cudaExtent(sx2, sy2, sz2));
cudaThreadSynchronize();
cudaCheckErrors("****GPU array memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_ArrayTemp, channelDesc, d_idata, sx2, sy2, sz2);
BindTexture(d_ArrayTemp, channelDesc);
CopyTranMatrix(iTmx, NDIM * sizeof(float));
affineTransform(d_odata, sx, sy, sz, sx2, sy2, sz2);
UnbindTexture();
cudaFreeArray(d_ArrayTemp);
return 0;
}
extern "C"
int affinetrans3d2(float *d_odata, float *iTmx, float *h_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2) {
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *d_ArrayTemp;
cudaMalloc3DArray(&d_ArrayTemp, &channelDesc, make_cudaExtent(sx2, sy2, sz2));
cudaThreadSynchronize();
cudaCheckErrors("****GPU array memory allocating fails... GPU out of memory !!!!*****\n");
cudacopyhosttoarray(d_ArrayTemp, channelDesc, h_idata, sx2, sy2, sz2);
BindTexture(d_ArrayTemp, channelDesc);
CopyTranMatrix(iTmx, NDIM * sizeof(float));
affineTransform(d_odata, sx, sy, sz, sx2, sy2, sz2);
UnbindTexture();
cudaFreeArray(d_ArrayTemp);
return 0;
}
float costfunc(float *x) {
if (dof9Flag) {
dof9tomatrix(affCoef, x, dofNum);
}
else {
p2matrix(affCoef, x);
}
float costValue = corrfunc(d_imgStatic, valueStatic, affCoef, sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2);
itNumStatic += 1;
return -costValue;
}
float costfunccpu(float *x) { // **** this function does not work correctly
if (dof9Flag) {
dof9tomatrix(affCoef, x, dofNum);
}
else {
p2matrix(affCoef, x);
}
double costValue = corrfunccpu2(h_s3D, h_t3D, affCoef, sxStatic1, syStatic1, szStatic1, sxStatic2, syStatic2, szStatic2);
itNumStatic += 1;
return (float)(-costValue / valueStatic);
}
extern "C"
float zncc0(float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
return 0;
}
extern "C"
float zncc1(float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
// d_img1, d_img2 value change after calculation
float znccValue = -2.0;
long long int totalSize = sx*sy*sz;
float *d_imgT = NULL;
cudaMalloc((void **)&d_imgT, totalSize * sizeof(float));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
double sumImg1 = 0, sumImg2 = 0, sumST = 0, sumSS = 0, sumTT = 0;
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_img1, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
addvaluegpu(d_img2, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
multi3Dgpu(d_imgT, d_img1, d_img2, sx, sy, sz);
sumST = sum3Dgpu(d_imgT, sx, sy, sz);
multi3Dgpu(d_imgT, d_img1, d_img1, sx, sy, sz);
sumTT = sum3Dgpu(d_imgT, sx, sy, sz);
multi3Dgpu(d_imgT, d_img2, d_img2, sx, sy, sz);
sumSS = sum3Dgpu(d_imgT, sx, sy, sz);
cudaFree(d_imgT);
float b = float(sqrt(sumTT*sumSS));
if (b != 0)
znccValue = sumST / b;
return znccValue;
}
extern "C"
float zncc2(float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
// d_img1, d_img2 value change after calculation
float znccValue = -2.0;
long long int totalSize = sx*sy*sz;
double sumImg1 = 0, sumImg2 = 0, sumST = 0, sumSS = 0, sumTT = 0;
float *h_img1 = (float *)malloc(totalSize * sizeof(float));
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_img1, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
addvaluegpu(d_img2, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
cudaMemcpy(h_img1, d_img1, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
multi3Dgpu(d_img1, d_img1, d_img1, sx, sy, sz);
sumTT = sum3Dgpu(d_img1, sx, sy, sz);
cudaMemcpy(d_img1, h_img1, totalSize * sizeof(float), cudaMemcpyHostToDevice);
multi3Dgpu(d_img1, d_img1, d_img2, sx, sy, sz);
sumST = sum3Dgpu(d_img1, sx, sy, sz);
multi3Dgpu(d_img2, d_img2, d_img2, sx, sy, sz);
sumSS = sum3Dgpu(d_img2, sx, sy, sz);
free(h_img1);
float b = float(sqrt(sumTT*sumSS));
if (b != 0)
znccValue = sumST / b;
return znccValue;
}
extern "C"
extern "C"
int reg3d_phasor0(long long int *shiftXYZ, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
return 0;
}
extern "C"
int reg3d_phasor1(long long int *shiftXYZ, float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz) {
int totalSize = sx * sy * sz;
int totalSizeSpectrum = sz * sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
cudaMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
cudaMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
cufftHandle
fftPlanFwd,
fftPlanInv;
cufftPlan3d(&fftPlanFwd, sz, sy, sx, CUFFT_R2C);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img1, (cufftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img2, (cufftComplex *)d_Spectrum2);
// multiplication and normalization
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
cufftDestroy(fftPlanFwd);
cufftPlan3d(&fftPlanInv, sz, sy, sx, CUFFT_C2R);
float *d_phasor1 = (float *)d_Spectrum1;
cufftExecC2R(fftPlanInv, (cufftComplex *)d_Spectrum2, (cufftReal *)d_phasor1);
cufftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_phasor1, sx, sy, sz, round(sx / 2), round(sy / 2), round(sz / 2));
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, sz);
shiftXYZ[0] = corXYZ[0] - sx / 2;
shiftXYZ[1] = corXYZ[1] - sy / 2;
shiftXYZ[2] = corXYZ[2] - sz / 2;
cudaFree(d_Spectrum1);
cudaFree(d_Spectrum2);
// compare 8 cases based on cross-correlation
long long int shiftX = shiftXYZ[0];
long long int shiftY = shiftXYZ[1];
long long int shiftZ = shiftXYZ[2];
long long int xabs = abs(shiftX), yabs = abs(shiftY), zabs = abs(shiftZ);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx /beta)) ||( yabs >(sy / beta)) || (zabs >(sz / beta))) {
float *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizex1 = xabs * sy * sz; sizex2 = (sx - xabs) * sy * sz;
sizey1 = sx *yabs * sz; sizey2 = sx * (sy - yabs) * sz;
sizez1 = sx * sy * zabs; sizez2 = sx * sy * (sz - zabs);
sizex = (sizex1 > sizex2) ? sizex1 : sizex2;
sizey = (sizey1 > sizey2) ? sizey1 : sizey2;
sizez = (sizez1 > sizez2) ? sizez1 : sizez2;
sizeMaxCrop = (sizex > sizey) ? sizex : sizey;
sizeMaxCrop = (sizeMaxCrop > sizez) ? sizeMaxCrop : sizez;
cudaMalloc((void **)&d_imgT, totalSize * sizeof(float));
cudaMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
cudaMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
circshiftgpu(d_imgT, d_img2, sx, sy, sz, -shiftX, -shiftY, -shiftZ);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
imSizeCropz[0] = sz - zabs; imSizeCropz[1] = zabs;
if (shiftZ > 0) {
imoz[0] = 0; imoz[1] = sz - zabs;
}
else {
imoz[0] = zabs; imoz[1] = 0;
}
int indx = 0, indy = 0, indz = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] > (sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] > (sy / beta)) {
for (int k = 0; k < 2; k++) {
if (imSizeCropz[k] > (sz / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k]);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
indz = k;
}
}
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXYZ[0] = shiftX - sx;
else
shiftXYZ[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXYZ[1] = shiftY - sy;
else
shiftXYZ[1] = shiftY + sy;
}
if (indz == 1) {
if (shiftZ > 0)
shiftXYZ[2] = shiftZ - sz;
else
shiftXYZ[2] = shiftZ + sz;
}
cudaFree(d_imgT);
cudaFree(d_crop1);
cudaFree(d_crop2);
}
return 0;
}
extern "C"
int reg3d_phasor2(long long int *shiftXYZ, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz) {
int totalSize = sx * sy * sz;
int totalSizeSpectrum = sz * sy*(sx / 2 + 1); // in complex floating format
fComplex *d_Spectrum1 = NULL, *d_Spectrum2 = NULL;
cudaMalloc((void **)&d_Spectrum1, totalSizeSpectrum * sizeof(fComplex));
cudaMalloc((void **)&d_Spectrum2, totalSizeSpectrum * sizeof(fComplex));
float *d_img = (float *)d_Spectrum1;
fComplex *h_Spectrum1 = (fComplex *)malloc(totalSizeSpectrum * sizeof(fComplex));
cufftHandle
fftPlanFwd,
fftPlanInv;
cufftPlan3d(&fftPlanFwd, sz, sy, sx, CUFFT_R2C);
cudaMemcpy(d_img, h_img1, totalSize * sizeof(float), cudaMemcpyHostToDevice);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img, (cufftComplex *)d_Spectrum2);
conj3Dgpu(d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
cudaMemcpy(h_Spectrum1, d_Spectrum1, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(d_img, h_img2, totalSize * sizeof(float), cudaMemcpyHostToDevice);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_img, (cufftComplex *)d_Spectrum2);
// multiplication and normalization
cudaMemcpy(d_Spectrum1, h_Spectrum1, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplexnorm3Dgpu(d_Spectrum2, d_Spectrum1, d_Spectrum2, sz, sy, (sx / 2 + 1));
cufftDestroy(fftPlanFwd);
cufftPlan3d(&fftPlanInv, sz, sy, sx, CUFFT_C2R);
cufftExecC2R(fftPlanInv, (cufftComplex *)d_Spectrum2, (cufftReal *)d_img);
cufftDestroy(fftPlanInv);
size_t corXYZ[3];
float *d_phasor2 = (float *)d_Spectrum2;
circshiftgpu(d_phasor2, d_img, sx, sy, sz, round(sx / 2), round(sy / 2), round(sz / 2));
float peakValue = max3Dgpu(&corXYZ[0], d_phasor2, sx, sy, sz);
shiftXYZ[0] = corXYZ[0] - sx / 2;
shiftXYZ[1] = corXYZ[1] - sy / 2;
shiftXYZ[2] = corXYZ[2] - sz / 2;
cudaFree(d_Spectrum1);
cudaFree(d_Spectrum2);
// compare 8 cases based on cross-correlation
long long int shiftX = shiftXYZ[0];
long long int shiftY = shiftXYZ[1];
long long int shiftZ = shiftXYZ[2];
long long int xabs = abs(shiftX), yabs = abs(shiftY), zabs = abs(shiftZ);
long long int beta = 4; // threshold value: only if shift is more than 1/beta of the image size
if ((xabs >(sx / beta)) || (yabs >(sy / beta)) || (zabs >(sz / beta))) {
float *d_img1 = NULL, *d_imgT = NULL, *d_crop1 = NULL, *d_crop2 = NULL;
long long int sizex1, sizex2, sizey1, sizey2, sizez1, sizez2, sizex, sizey, sizez, sizeMaxCrop;
sizex1 = xabs * sy * sz; sizex2 = (sx - xabs) * sy * sz;
sizey1 = sx *yabs * sz; sizey2 = sx * (sy - yabs) * sz;
sizez1 = sx * sy * zabs; sizez2 = sx * sy * (sz - zabs);
sizex = (sizex1 > sizex2) ? sizex1 : sizex2;
sizey = (sizey1 > sizey2) ? sizey1 : sizey2;
sizez = (sizez1 > sizez2) ? sizez1 : sizez2;
sizeMaxCrop = (sizex > sizey) ? sizex : sizey;
sizeMaxCrop = (sizeMaxCrop > sizez) ? sizeMaxCrop : sizez;
cudaMalloc((void **)&d_img1, totalSize * sizeof(float));
cudaMalloc((void **)&d_imgT, totalSize * sizeof(float));
cudaMalloc((void **)&d_crop1, sizeMaxCrop * sizeof(float));
cudaMalloc((void **)&d_crop2, sizeMaxCrop * sizeof(float));
cudaMemcpy(d_img1, h_img2, totalSize * sizeof(float), cudaMemcpyHostToDevice);
circshiftgpu(d_imgT, d_img1, sx, sy, sz, -shiftX, -shiftY, -shiftZ);
cudaMemcpy(d_img1, h_img1, totalSize * sizeof(float), cudaMemcpyHostToDevice);
// encode the 8 cases as for loop
long long int imSizeCropx[2], imSizeCropy[2], imSizeCropz[2];
long long int imox[2], imoy[2], imoz[2];
// index 0 records original shifts, index 1 switches the shift to the opposite case.
imSizeCropx[0] = sx - xabs; imSizeCropx[1] = xabs;
if (shiftX > 0) {
imox[0] = 0; imox[1] = sx - xabs;
}
else {
imox[0] = xabs; imox[1] = 0;
}
imSizeCropy[0] = sy - yabs; imSizeCropy[1] = yabs;
if (shiftY > 0) {
imoy[0] = 0; imoy[1] = sy - yabs;
}
else {
imoy[0] = yabs; imoy[1] = 0;
}
imSizeCropz[0] = sz - zabs; imSizeCropz[1] = zabs;
if (shiftZ > 0) {
imoz[0] = 0; imoz[1] = sz - zabs;
}
else {
imoz[0] = zabs; imoz[1] = 0;
}
int indx = 0, indy = 0, indz = 0;
float ccMax = -3, ccNow = 0;
for (int i = 0; i < 2; i++) {
if (imSizeCropx[i] >(sx / beta)) {
for (int j = 0; j < 2; j++) {
if (imSizeCropy[j] >(sy / beta)) {
for (int k = 0; k < 2; k++) {
if (imSizeCropz[k] >(sz / beta)) {
cropgpu2(d_crop1, d_img1, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
cropgpu2(d_crop2, d_imgT, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k], sx, sy, sz, imox[i], imoy[j], imoz[k]);
ccNow = zncc1(d_crop1, d_crop2, imSizeCropx[i], imSizeCropy[j], imSizeCropz[k]);
if (ccMax < ccNow) {
ccMax = ccNow;
indx = i;
indy = j;
indz = k;
}
}
}
}
}
}
}
// if ind ==1, flip the coordinates
if (indx == 1) {
if (shiftX > 0)
shiftXYZ[0] = shiftX - sx;
else
shiftXYZ[0] = shiftX + sx;
}
if (indy == 1) {
if (shiftY > 0)
shiftXYZ[1] = shiftY - sy;
else
shiftXYZ[1] = shiftY + sy;
}
if (indz == 1) {
if (shiftZ > 0)
shiftXYZ[2] = shiftZ - sz;
else
shiftXYZ[2] = shiftZ + sz;
}
cudaFree(d_img1);
cudaFree(d_imgT);
cudaFree(d_crop1);
cudaFree(d_crop2);
}
return 0;
}
int reg3d_affine0(float *h_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
return 0;
}
extern "C"
int reg3d_affine1(float *d_reg, float *iTmx, float *d_img1, float *d_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
// **** affine registration when GPU memory is sufficient: 3 images + 1 cuda array ***
/*
*** affine registration method:
0: no registration, transform d_img2 based on input matrix;
1: translation only;
2: rigid body;
3: 7 degrees of freedom (translation, rotation, scaling equally in 3 dimensions)
4: 9 degrees of freedom(translation, rotation, scaling);
5: 12 degrees of freedom;
6: rigid body first, then do 12 degrees of freedom;
7: 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
*** flagTmx:
true: use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
// ************get basic input images information ******************
// image size
sxStatic1 = sx; syStatic1 = sy; szStatic1 = sz;
sxStatic2 = sx; syStatic2 = sy; szStatic2 = sz;
// total pixel count for each image
long long int totalSize = sx*sy*sz;
// ****************** Processing Starts*****************//
// variables for memory and time cost records
clock_t ctime0, ctime1, ctime2, ctime3, ctime4;
ctime0 = clock();
// *** no registration
if (affMethod == 0) {
if (flagTmx)
(void)affinetrans3d1(d_reg, iTmx, d_img2, sx, sy, sz, sx, sy, sz);
else {
cudaMemcpy(d_reg, d_img2, totalSize * sizeof(float), cudaMemcpyDeviceToDevice);
for (int j = 0; j < NDIM; j++) iTmx[j] = 0;
iTmx[0] = iTmx[5] = iTmx[10] = 1;
}
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...no registration performed!\n");
}
return 0;
}
// *** registration
// for powell searching
affCoef = (float *)malloc((NDIM) * sizeof(float));
float *affCoefInitial = (float *)malloc((NDIM) * sizeof(float));
static float *p = (float *)malloc((NDIM + 1) * sizeof(float));
int iter;
float fret, **xi;
xi = matrix(1, NDIM, 1, NDIM);
for (int i = 1; i <= NDIM; i++)
for (int j = 1; j <= NDIM; j++)
xi[i][j] = (i == j ? 1.0 : 0.0);
for (int j = 0; j < NDIM; j++) affCoefInitial[j] = 0;
affCoefInitial[0] = 1;
affCoefInitial[5] = 1;
affCoefInitial[10] = 1;
float *affCoefTemp = (float *)malloc((NDIM) * sizeof(float));
float **xi_dof9;
static float *p_dof9 = (float *)malloc((10) * sizeof(float));
xi_dof9 = matrix(1, 9, 1, 9);
// **** allocate memory for the images:
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *d_Array;
// *****************************************************
// ************** Start processing ******************
double
sumImg1 = 0,
sumImg2 = 0,
sumSqr1 = 0;
// ****** the definition of 12 DOF coefficients is totally diffrent with that of 3 DOF, 6 DOF, 7 DOF or 9 DOF;
// if related to 3 DOF, 6 DOF, 7 DOF or 9 DOF (e.i. affMethod = 1, 2, 3, 4, 6, 7)
// then perfrom initial affine transformation based on input matrix
// *initialize transformation matrix
if (flagTmx) {
if (affMethod == 5) {
// use input matrix as initialization if inputTmx is true
memcpy(affCoefInitial, iTmx, NDIM * sizeof(float));
}
else {
// make affine transformation
(void)affinetrans3d1(d_reg, iTmx, d_img2, sx, sy, sz, sx, sy, sz);
}
}
if(affMethod != 5) {
xi_dof9 = matrix(1, 9, 1, 9);
for (int i = 1; i <= 9; i++)
for (int j = 1; j <= 9; j++)
xi_dof9[i][j] = (i == j ? 1.0 : 0.0);
p_dof9[0] = 0;
p_dof9[1] = 0; p_dof9[2] = 0; p_dof9[3] = 0;
p_dof9[4] = 0; p_dof9[5] = 0; p_dof9[6] = 0;
p_dof9[7] = 1; p_dof9[8] = 1; p_dof9[9] = 1;
}
// *** preprocess source image
if ((flagTmx)&&(affMethod != 5)) { // based on tranformed image
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
else {//based on input d_img2
sumImg2 = sum3Dgpu(d_img2, sx, sy, sz);
addvaluegpu(d_reg, d_img2, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
// transfer source image into GPU array (later converted to texture memory)
cudaMalloc3DArray(&d_Array, &channelDesc, make_cudaExtent(sx, sy, sz));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_Array, channelDesc, d_reg, sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 2 is zero, empty image input or empty image after initial transformation **** \n");
exit(1);
}
// *** preprocess target image
sumImg1 = sum3Dgpu(d_img1, sx, sy, sz);
addvaluegpu(d_reg, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
addvaluegpu(d_reg, d_img1, -float(sumImg1) / float(totalSize), sx, sy, sz);
cudaCheckErrors("****Image preprocessing fails...");
// *** 3D registration begains
// Create 3D texture for source image
BindTexture(d_Array, channelDesc);
// make target image as static
d_imgStatic = d_reg;
// calculate initial cost function value and time cost for each sub iteration
ctime1 = clock();
dof9Flag = false;
matrix2p(affCoefInitial, p);
ctime2 = clock();
records[1] = -costfunc(p);
records[4] = (float)(ctime2 - ctime1);
if (verbose) {
printf("...initial cross correlation value: %f;\n", records[1]);
printf("...time cost for single sub iteration: %f ms;\n", records[4]);
}
itNumStatic = 0;
switch (affMethod) {
case 1:
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 2:
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 3:
dof9Flag = true;
dofNum = 7;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 4:
dof9Flag = true;
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 5:
dof9Flag = false;
dofNum = 12;
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 6:
// do 6 DOF --> 12 DOF
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 7:
// do 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 3 DOF: %f;\n", -fret);
}
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, 0.005, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 9 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
default:
printf("\n ****Wrong affine registration method is setup, no registraiton performed !!! **** \n");
}
if ((flagTmx) && (affMethod != 5)) {
matrixmultiply(affCoefTemp, iTmx, affCoef); //final transformation matrix
memcpy(affCoef, affCoefTemp, NDIM * sizeof(float));
}
UnbindTexture();
memcpy(iTmx, affCoef, NDIM * sizeof(float));
ctime3 = clock();
records[3] = -fret; // negative of the mimized cost function value
records[5] = (float)itNumStatic;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
if (verbose) {
printf("...optimized cross correlation value: %f;\n", records[3]);
printf("...total sub iteration number: %d;\n", int(records[5]));
printf("...time cost for all iterations: %f s;\n", records[6]);
}
// ****Perform affine transformation with optimized coefficients****//
cudacopydevicetoarray(d_Array, channelDesc, d_img2, sx, sy, sz);
BindTexture(d_Array, channelDesc);
CopyTranMatrix(affCoef, NDIM * sizeof(float));
affineTransform(d_reg, sx, sy, sz, sx, sy, sz);
UnbindTexture();
free(affCoefTemp);
free(p_dof9);
free_matrix(xi_dof9, 1, 9, 1, 9);
free(affCoef);
free(affCoefInitial);
free(p);
free_matrix(xi, 1, NDIM, 1, NDIM);
//free GPU variables
cudaFreeArray(d_Array);
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...time cost for registration: %f s;\n", records[7]);
}
return 0;
}
extern "C"
int reg3d_affine2(float *d_reg, float *iTmx, float *h_img1, float *h_img2, long long int sx, long long int sy, long long int sz,
int affMethod, bool flagTmx, float FTOL, int itLimit, bool verbose, float *records) {
// **** affine registration when GPU memory is insufficient: 1 image + 1 cuda array ***
/*
*** affine registration method:
0: no registration, transform d_img2 based on input matrix;
1: translation only;
2: rigid body;
3: 7 degrees of freedom (translation, rotation, scaling equally in 3 dimensions)
4: 9 degrees of freedom(translation, rotation, scaling);
5: 12 degrees of freedom;
6: rigid body first, then do 12 degrees of freedom;
7: 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
*** flagTmx:
true: use iTmx as input matrix;
false: default;
*** records: 8 element array
[1] -[3]: initial ZNCC (zero-normalized cross-correlation, negtive of the cost function), intermediate ZNCC, optimized ZNCC;
[4] -[7]: single sub iteration time (in ms), total number of sub iterations, iteralation time (in s), whole registration time (in s);
*/
// ************get basic input images information ******************
// image size
sxStatic1 = sx; syStatic1 = sy; szStatic1 = sz;
sxStatic2 = sx; syStatic2 = sy; szStatic2 = sz;
// total pixel count for each image
long long int totalSize = sx*sy*sz;
// ****************** Processing Starts*****************//
// variables for memory and time cost records
clock_t ctime0, ctime1, ctime2, ctime3, ctime4;
ctime0 = clock();
// *** no registration
if (affMethod == 0) {
if (flagTmx)
(void)affinetrans3d2(d_reg, iTmx, h_img2, sx, sy, sz, sx, sy, sz);
else {
cudaMemcpy(d_reg, h_img2, totalSize * sizeof(float), cudaMemcpyHostToDevice);
for (int j = 0; j < NDIM; j++) iTmx[j] = 0;
iTmx[0] = iTmx[5] = iTmx[10] = 1;
}
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...no registration performed!\n");
}
return 0;
}
// *** registration
// for powell searching
affCoef = (float *)malloc((NDIM) * sizeof(float));
float *affCoefInitial = (float *)malloc((NDIM) * sizeof(float));
static float *p = (float *)malloc((NDIM + 1) * sizeof(float));
int iter;
float fret, **xi;
xi = matrix(1, NDIM, 1, NDIM);
for (int i = 1; i <= NDIM; i++)
for (int j = 1; j <= NDIM; j++)
xi[i][j] = (i == j ? 1.0 : 0.0);
for (int j = 0; j < NDIM; j++) affCoefInitial[j] = 0;
affCoefInitial[0] = 1;
affCoefInitial[5] = 1;
affCoefInitial[10] = 1;
float *affCoefTemp = (float *)malloc((NDIM) * sizeof(float));
float **xi_dof9;
static float *p_dof9 = (float *)malloc((10) * sizeof(float));
xi_dof9 = matrix(1, 9, 1, 9);
// **** allocate memory for the images:
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *d_Array;
float *h_imgTemp = (float *)malloc(totalSize * sizeof(float));
// *****************************************************
// ************** Start processing ******************
double
sumImg1 = 0,
sumImg2 = 0,
sumSqr1 = 0;
// ****** the definition of 12 DOF coefficients is totally diffrent with that of 3 DOF, 6 DOF, 7 DOF or 9 DOF;
// if related to 3 DOF, 6 DOF, 7 DOF or 9 DOF (e.i. affMethod = 1, 2, 3, 4, 6, 7)
// then perfrom initial affine transformation based on input matrix
// *initialize transformation matrix
if (flagTmx) {
if (affMethod == 5) {
// use input matrix as initialization if inputTmx is true
memcpy(affCoefInitial, iTmx, NDIM * sizeof(float));
}
else {
// make affine transformation
(void)affinetrans3d2(d_reg, iTmx, h_img2, sx, sy, sz, sx, sy, sz);
}
}
if (affMethod != 5) {
xi_dof9 = matrix(1, 9, 1, 9);
for (int i = 1; i <= 9; i++)
for (int j = 1; j <= 9; j++)
xi_dof9[i][j] = (i == j ? 1.0 : 0.0);
p_dof9[0] = 0;
p_dof9[1] = 0; p_dof9[2] = 0; p_dof9[3] = 0;
p_dof9[4] = 0; p_dof9[5] = 0; p_dof9[6] = 0;
p_dof9[7] = 1; p_dof9[8] = 1; p_dof9[9] = 1;
}
// *** preprocess source image
if ((flagTmx) && (affMethod != 5)) { // based on tranformed image
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
else {//based on input d_img2
cudaMemcpy(d_reg, h_img2, totalSize * sizeof(float), cudaMemcpyHostToDevice);
sumImg2 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg2) / float(totalSize), sx, sy, sz);
}
// transfer source image into GPU array (later converted to texture memory)
cudaMalloc3DArray(&d_Array, &channelDesc, make_cudaExtent(sx, sy, sz));
cudaCheckErrors("****GPU memory allocating fails... GPU out of memory !!!!*****\n");
cudacopydevicetoarray(d_Array, channelDesc, d_reg, sx, sy, sz);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 2 is zero, empty image input or empty image after initial transformation **** \n");
exit(1);
}
// *** preprocess target image
cudaMemcpy(d_reg, h_img1, totalSize * sizeof(float), cudaMemcpyHostToDevice);
sumImg1 = sum3Dgpu(d_reg, sx, sy, sz);
addvaluegpu(d_reg, d_reg, -float(sumImg1) / float(totalSize), sx, sy, sz);
cudaMemcpy(h_imgTemp, d_reg, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
multi3Dgpu(d_reg, d_reg, d_reg, sx, sy, sz);
sumSqr1 = sum3Dgpu(d_reg, sx, sy, sz);
valueStatic = sqrt(sumSqr1);
if (valueStatic == 0) {
fprintf(stderr, "*** SD of image 1 is zero, empty image input **** \n");
exit(1);
}
cudaMemcpy(d_reg, h_imgTemp, totalSize * sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("****Image preprocessing fails...");
// *** 3D registration begains
// Create 3D texture for source image
BindTexture(d_Array, channelDesc);
// make target image as static
d_imgStatic = d_reg;
// calculate initial cost function value and time cost for each sub iteration
ctime1 = clock();
dof9Flag = false;
matrix2p(affCoefInitial, p);
ctime2 = clock();
records[1] = -costfunc(p);
records[4] = (float)(ctime2 - ctime1);
if (verbose) {
printf("...initial cross correlation value: %f;\n", records[1]);
printf("...time cost for single sub iteration: %f ms;\n", records[4]);
}
itNumStatic = 0;
switch (affMethod) {
case 1:
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 2:
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 3:
dof9Flag = true;
dofNum = 7;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 4:
dof9Flag = true;
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 5:
dof9Flag = false;
dofNum = 12;
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 6:
// do 6 DOF --> 12 DOF
dof9Flag = true;
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
case 7:
// do 3 DOF --> 6 DOF --> 9 DOF --> 12 DOF
dof9Flag = true;
dofNum = 3;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 3 DOF: %f;\n", -fret);
}
dofNum = 6;
powell(p_dof9, xi_dof9, dofNum, 0.01, &iter, &fret, costfunc, &itNumStatic, itLimit);
if (verbose) {
printf("...cross correlation value after 6 DOF: %f;\n", -fret);
}
dofNum = 9;
powell(p_dof9, xi_dof9, dofNum, 0.005, &iter, &fret, costfunc, &itNumStatic, itLimit);
records[2] = -fret;
if (verbose) {
printf("...cross correlation value after 9 DOF: %f;\n", -fret);
}
// do DOF 12 registration
dof9Flag = false;
dofNum = 12;
matrix2p(affCoef, p);
powell(p, xi, dofNum, FTOL, &iter, &fret, costfunc, &itNumStatic, itLimit);
break;
default:
printf("\n ****Wrong affine registration method is setup, no registraiton performed !!! **** \n");
}
if ((flagTmx) && (affMethod != 5)) {
matrixmultiply(affCoefTemp, iTmx, affCoef); //final transformation matrix
memcpy(affCoef, affCoefTemp, NDIM * sizeof(float));
}
UnbindTexture();
memcpy(iTmx, affCoef, NDIM * sizeof(float));
ctime3 = clock();
records[3] = -fret; // negative of the mimized cost function value
records[5] = (float)itNumStatic;
records[6] = (float)(ctime3 - ctime2) / CLOCKS_PER_SEC;
if (verbose) {
printf("...optimized cross correlation value: %f;\n", records[3]);
printf("...total sub iteration number: %d;\n", int(records[5]));
printf("...time cost for all iterations: %f s;\n", records[6]);
}
// ****Perform affine transformation with optimized coefficients****//
cudacopyhosttoarray(d_Array, channelDesc, h_img2, sx, sy, sz);
BindTexture(d_Array, channelDesc);
CopyTranMatrix(affCoef, NDIM * sizeof(float));
affineTransform(d_reg, sx, sy, sz, sx, sy, sz);
UnbindTexture();
free(h_imgTemp);
free(affCoefTemp);
free(p_dof9);
free_matrix(xi_dof9, 1, 9, 1, 9);
free(affCoef);
free(affCoefInitial);
free(p);
free_matrix(xi, 1, NDIM, 1, NDIM);
//free GPU variables
cudaFreeArray(d_Array);
ctime4 = clock();
records[7] = (float)(ctime4 - ctime0) / CLOCKS_PER_SEC;
if (verbose) {
printf("...time cost for registration: %f s;\n", records[7]);
}
return 0;
}
// Deconvolution
extern "C"
void genOTFgpu(fComplex *d_odata, float *d_idata, long long int sx, long long int sy, long long int sz, long long int sx2,
long long int sy2, long long int sz2, bool normFlag) {
long long int totalSizeIn = sx2 * sy2 * sz2;
long long int totalSizeOut = sx * sy * sz;
long long int totalSizeMax = (totalSizeIn > totalSizeOut)?totalSizeIn:totalSizeOut;
float *d_temp = NULL;
cudaStatus = cudaMalloc((void **)&d_temp, totalSizeMax * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: GPU memory allocating error when calculating OTF \n");
exit(1);
}
if (normFlag) {
double sumValue = sum3Dgpu(d_idata, sx2, sy2, sz2);
multivaluegpu(d_temp, d_idata, (float)(1 / sumValue), sx2, sy2, sz2);
}
else
cudaMemcpy(d_temp, d_idata, totalSizeIn * sizeof(float), cudaMemcpyDeviceToDevice);
if ((sx<sx2) || (sy<sy2) || (sz<sz2)) {
alignsize3Dgpu((float *)d_odata, d_temp, sx, sy, sz, sx2, sy2, sz2);
padPSFgpu(d_temp, (float *)d_odata, sx, sy, sz, sx, sy, sz);
}
else {
padPSFgpu((float *)d_odata, d_temp, sx, sy, sz, sx2, sy2, sz2);
cudaMemcpy(d_temp, d_odata, totalSizeOut * sizeof(float), cudaMemcpyDeviceToDevice);
}
cufftHandle
fftPlanFwd;
cufftPlan3d(&fftPlanFwd, sx, sy, sz, CUFFT_R2C);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_temp, (cufftComplex *)d_odata);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error when calculating OTF \n");
exit(1);
}
cudaFree(d_temp);
cufftDestroy(fftPlanFwd);
}
extern "C"
int decon_singleview_OTF0(float *h_decon, float *h_img, fftwf_complex *h_OTF, fftwf_complex *h_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface on CPU ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
clock_t start, end;
start = clock();
float *h_StackA = h_img, *h_StackE = h_decon;
float *h_StackT = (float *)malloc(totalSize * sizeof(float));
fftwf_complex *h_StackESpectrum = (fftwf_complex *)malloc(totalSizeSpectrum * sizeof(fftwf_complex));
// initialize estimation
maxvaluecpu(h_StackA, h_StackA, (float)(SMALLVALUE), totalSize);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue = (float)sumcpu(h_StackA, totalSize);
memset(h_StackE, 0, totalSize * sizeof(float));
addvaluecpu(h_StackE, h_StackE, meanValue, totalSize);
}
else { // use measured images as initial
memcpy(h_StackE, h_StackA, totalSize * sizeof(float));
}
fftwf_plan stackE2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackE, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan stackT2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackT, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan spectrum2StackT = fftwf_plan_dft_c2r_3d(sx, sy, sz, h_StackESpectrum, h_StackT, FFTW_MEASURE);
printf("...Start CPU Decon\n");
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
divcpu(h_StackT, h_StackA, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
}
free(h_StackT);
free(h_StackESpectrum);
fftwf_destroy_plan(stackE2Spectrum);
fftwf_destroy_plan(stackT2Spectrum);
fftwf_destroy_plan(spectrum2StackT);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_singleview_OTF1(float *d_decon, float *d_img, fComplex *d_OTF, fComplex *d_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface when GPU memory is sufficient ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
size_t freeMem = 0, totalMem = 0;
cufftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *d_StackA = d_img, *d_StackE = d_decon;
float *d_StackT = NULL;
fComplex *d_StackESpectrum = NULL;
cudaMalloc((void **)&d_StackT, totalSize * sizeof(float));
cudaMalloc((void **)&d_StackESpectrum, totalSizeSpectrum * sizeof(fComplex));
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
if(flagConstInitial) {// use constant mean value as initial
float meanValue = (float)sum3Dgpu(d_StackA, sx, sy, sz);
cudaMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, meanValue, sx, sy, sz);
}
else { // use measured image as initial
cudaMemcpy(d_StackE, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToDevice);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
cufftPlan3d(&fftPlanFwd, sx, sy, sz, CUFFT_R2C);
cufftPlan3d(&fftPlanInv, sx, sy, sz, CUFFT_C2R);
cudaMemGetInfo(&freeMem, &totalMem);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// forward
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackA, d_StackT, sx, sy, sz);
// backward
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackT, (cufftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz); // eliminate possible negative values
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
cudaFree(d_StackT); cudaFree(d_StackESpectrum);
cufftDestroy(fftPlanFwd);
cufftDestroy(fftPlanInv);
cudaMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_singleview_OTF2(float *d_decon, float *d_img, fComplex *h_OTF, fComplex *h_OTF_bp,
long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** single view deconvolution with OTF interface when GPU memory is insufficient: 2 images + 2 fftPlans ***
// **** d_decon and d_img should have total size: sx * sy*(sz / 2 + 1) * sizeof(float) to store image spectrum
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
// *****
size_t freeMem = 0, totalMem = 0;
cufftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *h_StackA = NULL, *h_StackE = NULL;
h_StackA = (float *)malloc(totalSize * sizeof(float));
h_StackE = (float *)malloc(totalSize * sizeof(float));
float *d_StackA = d_img, *d_StackE = d_decon;
fComplex *d_OTF = NULL, *d_OTF_bp = NULL, *d_StackESpectrum = NULL;
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
cudaMemcpy(h_StackA, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
//if (initialFlag) // use measured image as initial
if (flagConstInitial) { // use constant mean value as initial
float meanValue = (float)sum3Dgpu(d_StackA, sx, sy, sz);
cudaMemset(d_StackA, 0, totalSize * sizeof(float));
addvaluegpu(d_StackA, d_StackA, meanValue, sx, sy, sz);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
cudaMemcpy(h_StackE, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
d_OTF = (fComplex *)d_StackA; // share the same physic memory
d_OTF_bp = (fComplex *)d_StackA; // share the same physic memory
d_StackESpectrum = (fComplex *)d_StackE; // share the same physic memory
// Create FFT plans
cufftPlan3d(&fftPlanFwd, sx, sy, sz, CUFFT_R2C);
cufftPlan3d(&fftPlanInv, sx, sy, sz, CUFFT_C2R);
cudaMemGetInfo(&freeMem, &totalMem);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// forward
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackA, (cufftComplex *)d_StackESpectrum);
cudaMemcpy(d_OTF, h_OTF, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackA);
cudaMemcpy(d_StackE, h_StackA, totalSize * sizeof(float), cudaMemcpyHostToDevice);
div3Dgpu(d_StackA, d_StackE, d_StackA, sx, sy, sz);
// backward
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackA, (cufftComplex *)d_StackESpectrum);
cudaMemcpy(d_OTF_bp, h_OTF_bp, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackA);
cudaMemcpy(d_StackE, h_StackE, totalSize * sizeof(float), cudaMemcpyHostToDevice);
multi3Dgpu(d_StackA, d_StackE, d_StackA, sx, sy, sz);
maxvalue3Dgpu(d_StackA, d_StackA, float(SMALLVALUE), sx, sy, sz); // eliminate possible negative values
cudaMemcpy(h_StackE, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
}
cudaMemcpy(d_StackE, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToDevice);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
free(h_StackA); free(h_StackE);
cufftDestroy(fftPlanFwd);
cufftDestroy(fftPlanInv);
cudaMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF0(float *h_decon, float *h_img1, float *h_img2, fftwf_complex *h_OTF1, fftwf_complex *h_OTF2, fftwf_complex *h_OTF_bp1,
fftwf_complex *h_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface on CPU ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
clock_t start, end;
start = clock();
float *h_StackA = h_img1, *h_StackB = h_img2, *h_StackE = h_decon;
float *h_StackT = (float *)malloc(totalSize * sizeof(float));
fftwf_complex *h_StackESpectrum = (fftwf_complex *)malloc(totalSizeSpectrum * sizeof(fftwf_complex));
// initialize estimation
maxvaluecpu(h_StackA, h_StackA, (float)(SMALLVALUE), totalSize);
maxvaluecpu(h_StackB, h_StackB, (float)(SMALLVALUE), totalSize);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sumcpu(h_StackA, totalSize);
float meanValue2 = (float)sumcpu(h_StackB, totalSize);
memset(h_StackE, 0, totalSize * sizeof(float));
addvaluecpu(h_StackE, h_StackE, (meanValue1 + meanValue2) / 2, totalSize);
}
else { // use measured images as initial
addcpu(h_StackE, h_StackA, h_StackB, totalSize);
multivaluecpu(h_StackE, h_StackE, (float)0.5, totalSize);
}
fftwf_plan stackE2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackE, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan stackT2Spectrum = fftwf_plan_dft_r2c_3d(sx, sy, sz, h_StackT, h_StackESpectrum, FFTW_MEASURE);
fftwf_plan spectrum2StackT = fftwf_plan_dft_c2r_3d(sx, sy, sz, h_StackESpectrum, h_StackT, FFTW_MEASURE);
printf("...Start CPU Decon\n");
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF1, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
//printf("here!\n");
divcpu(h_StackT, h_StackA, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp1, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
return 0;
fftwf_execute(stackE2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF2, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
divcpu(h_StackT, h_StackB, h_StackT, totalSize);
fftwf_execute(stackT2Spectrum);
multicomplexcpu((fComplex *)h_StackESpectrum, (fComplex *)h_StackESpectrum, (fComplex *)h_OTF_bp2, sx * sy * (sz / 2 + 1));
fftwf_execute(spectrum2StackT);
multicpu(h_StackE, h_StackE, h_StackT, totalSize);//
}
free(h_StackT);
free(h_StackESpectrum);
fftwf_destroy_plan(stackE2Spectrum);
fftwf_destroy_plan(stackT2Spectrum);
fftwf_destroy_plan(spectrum2StackT);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF1(float *d_decon, float *d_img1, float *d_img2, fComplex *d_OTF1, fComplex *d_OTF2, fComplex *d_OTF_bp1,
fComplex *d_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface when GPU memory is sufficient: 9 images + 2 fftPlans ***
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
size_t freeMem = 0, totalMem = 0;
cufftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *d_StackA = d_img1, *d_StackB = d_img2, *d_StackE = d_decon;
float *d_StackT = NULL;
fComplex *d_StackESpectrum = NULL;
cudaMalloc((void **)&d_StackT, totalSize * sizeof(float));
cudaMalloc((void **)&d_StackESpectrum, totalSizeSpectrum * sizeof(fComplex));
// initialize estimation
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
maxvalue3Dgpu(d_StackB, d_StackB, (float)(SMALLVALUE), sx, sy, sz);
// initialize estimation
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sum3Dgpu(d_StackA, sx, sy, sz);
float meanValue2 = (float)sum3Dgpu(d_StackB, sx, sy, sz);
cudaMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, (meanValue1 + meanValue2) / 2, sx, sy, sz);
}
else { // use measured images as initial
add3Dgpu(d_StackE, d_StackA, d_StackB, sx, sy, sz);
multivaluegpu(d_StackE, d_StackE, (float)0.5, sx, sy, sz);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
cufftPlan3d(&fftPlanFwd, sx, sy, sz, CUFFT_R2C);
cufftPlan3d(&fftPlanInv, sx, sy, sz, CUFFT_C2R);
cudaMemGetInfo(&freeMem, &totalMem);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
// ### 1st view
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF1, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackA, d_StackT, sx, sy, sz);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackT, (cufftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp1, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);//
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
// ### 2nd view
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);//
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF2, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
div3Dgpu(d_StackT, d_StackB, d_StackT, sx, sy, sz);//
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackT, (cufftComplex *)d_StackESpectrum);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF_bp2, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackT);
multi3Dgpu(d_StackE, d_StackE, d_StackT, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
cudaFree(d_StackT); cudaFree(d_StackESpectrum);
cufftDestroy(fftPlanFwd);
cufftDestroy(fftPlanInv);
cudaMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
extern "C"
int decon_dualview_OTF2(float *d_decon, float *d_img1, float *h_img2, fComplex *h_OTF1, fComplex *h_OTF2, fComplex *h_OTF_bp1,
fComplex *h_OTF_bp2, long long int sx, long long int sy, long long int sz, int itNumForDecon, bool flagConstInitial) {
// **** dual-view deconvolution with OTF interface when GPU memory is insufficient: 2 images + 2 fftPlans ***
// **** d_decon and d_img should have total size: sx * sy*(sz / 2 + 1) * sizeof(float) to store image spectrum
// image size
long long int totalSize = sx*sy*sz; // in floating format
long long int totalSizeSpectrum = sx * sy*(sz / 2 + 1); // in complex floating format
// *****
size_t freeMem = 0, totalMem = 0;
cufftHandle
fftPlanFwd,
fftPlanInv;
clock_t start, end;
start = clock();
float *h_StackA = NULL, *h_StackB = NULL, *h_StackE = NULL;
h_StackA = (float *)malloc(totalSize * sizeof(float));
h_StackB = (float *)malloc(totalSize * sizeof(float));
h_StackE = (float *)malloc(totalSize * sizeof(float));
float *d_StackA = d_img1, *d_StackE = d_decon;
float *d_StackB = NULL;
fComplex *d_OTF = NULL, *d_StackESpectrum = NULL;
d_StackESpectrum = (fComplex *)d_StackA;
d_OTF = (fComplex *)d_StackE;
cudaStatus = cudaGetLastError();
// initialize estimation
cudaMalloc((void **)&d_StackB, totalSize * sizeof(float));
maxvalue3Dgpu(d_StackA, d_StackA, (float)(SMALLVALUE), sx, sy, sz);
cudaMemcpy(d_StackB, h_img2, totalSize * sizeof(float), cudaMemcpyHostToDevice);
maxvalue3Dgpu(d_StackB, d_StackB, (float)(SMALLVALUE), sx, sy, sz);
cudaMemcpy(h_StackA, d_StackA, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_StackB, d_StackB, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
if (flagConstInitial) { // use constant mean value as initial
float meanValue1 = (float)sum3Dgpu(d_StackA, sx, sy, sz);
float meanValue2 = (float)sum3Dgpu(d_StackB, sx, sy, sz);
cudaMemset(d_StackE, 0, totalSize * sizeof(float));
addvaluegpu(d_StackE, d_StackE, (meanValue1 + meanValue2) / 2, sx, sy, sz);
}
else { // use measured images as initial
add3Dgpu(d_StackE, d_StackA, d_StackB, sx, sy, sz);
multivaluegpu(d_StackE, d_StackE, (float)0.5, sx, sy, sz);
}
cudaMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_StackB); // release temperary variable
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: initial image preparation failed \n");
exit(1);
}
// Create FFT plans
cufftPlan3d(&fftPlanFwd, sx, sy, sz, CUFFT_R2C);
cufftPlan3d(&fftPlanInv, sx, sy, sz, CUFFT_C2R);
cudaMemGetInfo(&freeMem, &totalMem);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: cufftPlan error \n");
exit(1);
}
printf("...GPU free memory (before decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
for (int itNum = 1; itNum <= itNumForDecon; itNum++) {
//printf("...Processing iteration %d\n", it);
// ### 1st view
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);
cudaMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(d_OTF, h_OTF1, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackE);
cudaMemcpy(d_StackA, h_StackA, totalSize * sizeof(float), cudaMemcpyHostToDevice);
div3Dgpu(d_StackE, d_StackA, d_StackE, sx, sy, sz);
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);
cudaMemcpy(d_OTF, h_OTF_bp1, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackE);
cudaMemcpy(d_StackA, h_StackE, totalSize * sizeof(float), cudaMemcpyHostToDevice);
multi3Dgpu(d_StackE, d_StackE, d_StackA, sx, sy, sz);//
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
// ### 2nd view
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);//
cudaMemcpy(h_StackE, d_StackE, totalSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(d_OTF, h_OTF2, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackE);
cudaMemcpy(d_StackA, h_StackB, totalSize * sizeof(float), cudaMemcpyHostToDevice);
div3Dgpu(d_StackE, d_StackA, d_StackE, sx, sy, sz);//
cufftExecR2C(fftPlanFwd, (cufftReal *)d_StackE, (cufftComplex *)d_StackESpectrum);
cudaMemcpy(d_OTF, h_OTF_bp2, totalSizeSpectrum * sizeof(fComplex), cudaMemcpyHostToDevice);
multicomplex3Dgpu(d_StackESpectrum, d_StackESpectrum, d_OTF, sx, sy, (sz / 2 + 1));
cufftExecC2R(fftPlanInv, (cufftComplex *)d_StackESpectrum, (cufftReal *)d_StackE);
cudaMemcpy(d_StackA, h_StackE, totalSize * sizeof(float), cudaMemcpyHostToDevice);
multi3Dgpu(d_StackE, d_StackE, d_StackA, sx, sy, sz);
maxvalue3Dgpu(d_StackE, d_StackE, float(SMALLVALUE), sx, sy, sz);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "*** FAILED - ABORTING: decon iterration error \n");
exit(1);
}
free(h_StackA); free(h_StackB); free(h_StackE);
cufftDestroy(fftPlanFwd);
cufftDestroy(fftPlanInv);
cudaMemGetInfo(&freeMem, &totalMem);
printf("...GPU free memory (after decon iteration) is %.0f MBites\n", (float)freeMem / 1048576.0f);
end = clock();
printf("...Time cost for decon is %2.3f s\n", (float)(end - start) / CLOCKS_PER_SEC);
return 0;
}
#undef SMALLVALUE
#undef NDIM
|
7eedd38e02c75e10ccefd99ebcaf0bfeec658921.hip | // !!! This is a file automatically generated by hipify!!!
/*
by Qin Yu, Apr 2019
*/
#include <algorithm> // sort, any_of.
#include <cassert> // assert.
#include <iostream> // cout, endl.
using namespace std;
#include "svm.h"
// Not used but may help:
// #include <stdio.h>
// #include <stdlib.h>
// #include <random>
// #include <ctime>
int main(int argc, char const *argv[]) {
cout << argc << endl;
for (size_t i = 0; i < argc; i++) {
cout << "argv[" << i << "] = " << argv[i] << endl;
}
if (argc != 4) {
cout << "Must have 1 integer argument for C and M, and float for ACCURACY" << endl;
return 1;
}
uint32_t C, M;
float ACCURACY;
sscanf(argv[1], "%d", &C);
sscanf(argv[2], "%d", &M);
sscanf(argv[3], "%f", &ACCURACY);
cout << "C = " << C << " and M = " << M << " with accuracy = " << ACCURACY << endl;
int class1_limit = 2;
int class2_limit = 2;
int number_of_SVMs = ((class1_limit - 1) * class2_limit) / 2;
cout << "Will train " << number_of_SVMs << " SVMs" << endl;
// Record trained SVMs:
float **all_alpha = new float *[45]();
uint8_t ***all_X = new uint8_t **[45]();
int **all_y = new int *[45]();
int *all_L = new int[45]();
double *time_all_pairs = new double[45]();
int pair_counter = 0;
for (size_t i = 0; i < class1_limit; i++) {
for (size_t j = 0; j < class2_limit; j++) {
if (i < j) {
cout << endl << "Starting training " << i << " vs " << j << endl;
time_all_pairs[pair_counter] = mnist_2_class_training(
i, j, pair_counter, C, M, ACCURACY, all_L, all_alpha, all_X, all_y);
if (time_all_pairs[pair_counter] < 0) {
cout << "Something Wrong when training SVM " << i << " vs " << j
<< endl;
return -1;
}
// cout << "Time spent on " << i << " vs " << j << " = " <<
// time_all_pairs[pair_counter] << endl;
pair_counter++;
}
}
}
cout << "---------------------------------------------" << endl;
pair_counter = 0;
double total_time = 0;
for (size_t i = 0; i < class1_limit; i++) {
for (size_t j = 0; j < class2_limit; j++) {
if (i < j) {
cout << "Time spent on " << i << " vs " << j << " = "
<< time_all_pairs[pair_counter] << endl;
total_time += time_all_pairs[pair_counter];
pair_counter++;
}
}
}
cout << "Total time spent on training all " << number_of_SVMs
<< "SVMs = " << total_time << endl;
// Load MNIST test data:
int mnist_loading_error = 0;
uint32_t magic_number_label_ts;
uint32_t number_of_labels_ts;
uint8_t *test_labels;
mnist_loading_error = read_MNIST_test_labels(
magic_number_label_ts, number_of_labels_ts, test_labels);
if (mnist_loading_error) return -1;
uint32_t magic_number_image_ts;
uint32_t number_of_images_ts, n_rows_ts, n_cols_ts, n_pixels_ts,
n_features_ts;
uint8_t **test_images;
mnist_loading_error = read_MNIST_test_images(
magic_number_image_ts, number_of_images_ts, n_rows_ts, n_cols_ts,
n_pixels_ts, n_features_ts, test_images);
if (mnist_loading_error) {
delete[] test_labels;
test_labels = NULL;
for (size_t i = 0; i < number_of_images_ts; i++) {
delete[] test_images[i];
test_images[i] = NULL;
}
delete[] test_images;
test_images = NULL;
return -1;
}
// Fraction Table:
float *B = new float[256 * 256];
for (int i = 0; i < 256; i++)
for (int j = 0; j < 256; j++)
B[i * 256 + j] = pow(i / 255.0f - j / 255.0f, 2);
// Compute Testing Kernel Matrix:
int **all_y_hat = new int *[45];
int L_ts = number_of_images_ts;
uint8_t **X_ts = test_images;
uint8_t *y_ts = test_labels;
hipError_t cudaStatus;
int idx = 0;
for (size_t m = 0; m < class1_limit; m++) {
for (size_t n = 0; n < class2_limit; n++) {
if (m < n) {
int L = all_L[idx];
uint8_t **X = all_X[idx];
int *y = all_y[idx];
all_y_hat[idx] = new int[L_ts];
float *alpha = all_alpha[idx];
float *alpha_y = new float[L](); // alpah .* y
float *K_ts = new float[L_ts * L];
cudaStatus = compute_kernel_matrix_ts(785, L_ts, X_ts, L, X, K_ts, B);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "kernel_kernel_matrix launch failed: %s\n",
hipGetErrorString(cudaStatus));
}
for (size_t i = 0; i < L; i++) alpha_y[i] = alpha[i] * y[i];
for (uint32_t i = 0; i < L_ts; i++) {
float temp_sigma = dot_product_float((K_ts + i * L), alpha_y, L);
int is_positive = (float(0) < temp_sigma) - (temp_sigma < float(0));
all_y_hat[idx][i] = (is_positive > 0) ? m : n;
}
delete[] alpha_y;
alpha_y = NULL;
delete[] K_ts;
K_ts = NULL;
idx++;
// cout << "no problem 1" << endl;
} // cout << "no problem 2" << endl;
} // cout << "no problem 3" << endl;
} // cout << "no problem 4" << endl;
cout << "L_ts = " << L_ts << endl;
int *final_pred_y = new int[L_ts];
int testing_error_number = 0;
for (size_t i = 0; i < L_ts; i++) {
int count_vote[10] = {0};
for (size_t j = 0; j < number_of_SVMs; j++) {
count_vote[all_y_hat[j][i]] += 1;
}
int max_vote_number = 0;
int max_vote_index = 0;
for (size_t k = 0; k < 10; k++) {
if (count_vote[k] > max_vote_number) {
max_vote_number = count_vote[k];
max_vote_index = k;
}
}
final_pred_y[i] = max_vote_index;
if (final_pred_y[i] != y_ts[i]) testing_error_number++;
}
float testing_error_rate = float(testing_error_number) / L_ts;
cout << "Final Testing Error 10 Classes = " << (1 - testing_error_rate) * 100
<< "%" << endl;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_y_hat[i];
all_y_hat[i] = NULL;
}
delete[] all_y_hat;
all_y_hat = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_y[i];
all_y[i] = NULL;
}
delete[] all_y;
all_y = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_alpha[i];
all_alpha[i] = NULL;
}
delete[] all_alpha;
all_alpha = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
for (size_t j = 0; j < all_L[i]; j++) {
delete[] all_X[i][j];
all_X[i][j] = NULL;
}
delete[] all_X[i];
all_X[i] = NULL;
}
delete[] all_X;
all_X = NULL;
delete[] all_L;
all_L = NULL;
delete[] time_all_pairs;
time_all_pairs = NULL;
delete[] test_labels;
test_labels = NULL;
for (size_t i = 0; i < number_of_images_ts; i++) {
delete[] test_images[i];
test_images[i] = NULL;
}
delete[] test_images;
test_images = NULL;
delete[] B;
B = NULL;
return 0;
}
double mnist_2_class_training(int class1, int class2, int pair_index,
uint32_t C, uint32_t M, float accuracy,
int *&all_L, float **&all_alpha,
uint8_t ***&all_X, int **&all_y) {
clock_t total_start_time = clock();
assert(sizeof(float) == 4);
// Load MNIST:
clock_t total_loading_time = clock();
int mnist_loading_error = 0;
uint32_t magic_number_label;
uint32_t number_of_labels;
uint8_t *train_labels;
mnist_loading_error = read_MNIST_train_labels(magic_number_label,
number_of_labels, train_labels);
if (mnist_loading_error) return -1;
uint32_t magic_number_image;
uint32_t number_of_images, n_rows, n_cols, n_pixels, n_features;
uint8_t **train_images;
mnist_loading_error =
read_MNIST_train_images(magic_number_image, number_of_images, n_rows,
n_cols, n_pixels, n_features, train_images);
if (mnist_loading_error) {
delete[] train_labels;
train_labels = NULL;
for (size_t i = 0; i < number_of_images; i++) {
delete[] train_images[i];
train_images[i] = NULL;
}
delete[] train_images;
train_images = NULL;
return -1;
}
clock_t time_finished_loading = clock();
cout << "load time = "
<< double(time_finished_loading - total_start_time) / CLOCKS_PER_SEC
<< endl;
// Extract 1v1 data:
uint8_t class_label_pos = uint8_t(class1), class_label_neg = uint8_t(class2);
uint32_t number_of_labels_pos = 0, number_of_labels_neg = 0,
number_of_data_1v1 = 0;
uint8_t **train_images_1v1;
int8_t *train_labels_1v1;
extract_train_data_1v1(class_label_pos, number_of_labels_pos, class_label_neg,
number_of_labels_neg, number_of_labels,
number_of_images, number_of_data_1v1, train_labels,
train_labels_1v1, train_images, train_images_1v1,
n_features, n_cols);
clock_t time_finished_extracting = clock();
cout << "extract time = "
<< double(time_finished_extracting - time_finished_loading) /
CLOCKS_PER_SEC
<< endl;
// Fraction Table:
float *K = new float[number_of_data_1v1 *
number_of_data_1v1]; // Must be defined by `new`.
float *B = new float[256 * 256];
for (int i = 0; i < 256; i++)
for (int j = 0; j < 256; j++)
B[i * 256 + j] = pow(i / 255.0f - j / 255.0f, 2);
// Compute Kernel Matrix:
hipError_t cudaStatus;
clock_t time_started_kernelmatrix = clock();
cudaStatus = compute_kernel_matrix(n_features, number_of_data_1v1,
train_images_1v1, K, B);
clock_t time_finished_kernelmatrix = clock();
cout << "matrix time = "
<< double(time_finished_kernelmatrix - time_started_kernelmatrix) /
CLOCKS_PER_SEC
<< endl;
// Set meta parameters:
const uint32_t L = number_of_data_1v1;
// Define alpha, sigma:
int *iters = new int[1]();
float *alpha = new float[L](); // `()` initialises it to zeros.
float *sigma = new float[L]();
int *y = new int[L];
for (size_t i = 0; i < L; i++) y[i] = train_labels_1v1[i];
float *alpha_y = new float[L](); // alpah .* y
// Select initial working set:
uint32_t *all_data_point_idx = new uint32_t[L];
for (uint32_t i = 0; i < M; i++) all_data_point_idx[i] = i;
uint32_t *support_vector_idx = new uint32_t[M];
for (uint32_t i = 0; i < M; i++) support_vector_idx[i] = i;
uint32_t number_of_sv = M;
// Decomposition/Mini-batch algorithm:
uint32_t kkt_counter = 0;
// int number_of_single_violation = 0;
double total_minibatch_optimisation_time = 0;
uint32_t remaining_number_of_sv = 0;
while (!kkt_conditions_monitor(L, y, sigma, alpha, C) && kkt_counter < 100) {
kkt_counter++;
cout << "LOOPING: " << kkt_counter << " ";
// Select data for GPU (not prepare GPU data):
int *mini_y = new int[number_of_sv];
float *mini_a = new float[number_of_sv];
float *mini_s = new float[number_of_sv];
float *mini_K = new float[number_of_sv * number_of_sv];
for (size_t i = 0; i < number_of_sv; i++) {
mini_y[i] = y[support_vector_idx[i]];
mini_a[i] = alpha[support_vector_idx[i]];
mini_s[i] = sigma[support_vector_idx[i]];
for (size_t j = 0; j < number_of_sv; j++)
mini_K[i * number_of_sv + j] =
K[support_vector_idx[i] * L + support_vector_idx[j]];
}
// Call GPU kernel (including GPU data preparation):
clock_t kernel_start_time = clock();
if (number_of_sv > 1024) {
cudaStatus = kernel_minibatch_wrapper(iters, mini_a, mini_s, mini_K,
mini_y, number_of_sv, C);
} else {
cudaStatus = kernel_minibatch_block_wrapper(iters, mini_a, mini_s, mini_K,
mini_y, number_of_sv, C);
}
if (cudaStatus != hipSuccess) {
fprintf(stderr, "wrapper failed!");
return -1;
}
clock_t kernel_finish_time = clock();
total_minibatch_optimisation_time +=
double(kernel_finish_time - kernel_start_time) / CLOCKS_PER_SEC;
// Update gradient:
for (size_t i = 0; i < number_of_sv; i++)
alpha[support_vector_idx[i]] = mini_a[i];
for (size_t i = 0; i < L; i++) {
sigma[i] = 0;
for (size_t j = 0; j < L; j++) sigma[i] += alpha[j] * y[j] * K[i * L + j];
}
// Remove non-support vectors:
// To avoid getting right array length by another loop, allocate excessive
// amount of memory.
uint32_t *remaining_support_vector_idx = new uint32_t[number_of_sv];
remaining_number_of_sv = 0;
for (size_t i = 0; i < number_of_sv; i++) {
if (mini_a[i] == 0) continue;
remaining_support_vector_idx[remaining_number_of_sv] =
support_vector_idx[i];
remaining_number_of_sv++;
}
delete[] support_vector_idx;
support_vector_idx = new uint32_t[remaining_number_of_sv];
memcpy(support_vector_idx, remaining_support_vector_idx,
remaining_number_of_sv * sizeof(uint32_t));
delete[] remaining_support_vector_idx;
remaining_support_vector_idx = NULL;
// Select new points who violate KKT conditions:
float *violation_val_dirty = new float[L];
uint32_t *violation_idx_dirty = new uint32_t[L];
uint32_t number_of_violations = 0;
const float kkt1 = 1 - accuracy;
const float kkt2 = 1 + accuracy;
const float kkt3 = 0.01;
for (uint32_t i = 0; i < L; i++) {
float yfx = y[i] * sigma[i];
if ((alpha[i] == 0 && yfx < kkt1) ||
(alpha[i] == C && yfx > kkt2) ||
(0 < alpha[i] && alpha[i] < C && !(abs(yfx - 1) < kkt3))) {
violation_idx_dirty[number_of_violations] = i;
violation_val_dirty[number_of_violations] = yfx;
number_of_violations++;
}
}
cout << "number of new violation is " << number_of_violations << endl;
float *violation_val = new float[number_of_violations];
uint32_t *violation_idx = new uint32_t[number_of_violations];
memcpy(violation_val, violation_val_dirty,
number_of_violations * sizeof(float));
memcpy(violation_idx, violation_idx_dirty,
number_of_violations * sizeof(uint32_t));
delete[] violation_val_dirty;
violation_val_dirty = NULL;
delete[] violation_idx_dirty;
violation_idx_dirty = NULL;
// Sort new points and discard duplication (with respect to remaining
// working set):
uint32_t *sort_perm = new uint32_t[number_of_violations];
for (uint32_t i = 0; i < number_of_violations; i++) sort_perm[i] = i;
sort(sort_perm, sort_perm + number_of_violations,
[violation_val](uint32_t a, uint32_t b) -> bool {
return violation_val[a] < violation_val[b];
});
uint32_t *violation_idx_sorted_unique = new uint32_t[number_of_violations];
uint32_t number_of_violations_unique = 0;
for (size_t i = 0; i < number_of_violations; i++) {
uint32_t this_support_vector_idx = violation_idx[sort_perm[i]];
if (any_of(support_vector_idx,
support_vector_idx + remaining_number_of_sv,
[this_support_vector_idx](uint32_t idx) {
return idx == this_support_vector_idx;
}))
continue;
violation_idx_sorted_unique[number_of_violations_unique] =
this_support_vector_idx;
number_of_violations_unique++;
}
delete[] sort_perm;
sort_perm = NULL;
delete[] violation_val;
violation_val = NULL;
delete[] violation_idx;
violation_idx = NULL;
// Concatenate remaining working set and violation set:
number_of_sv = remaining_number_of_sv + number_of_violations_unique;
uint32_t *new_support_vector_idx = new uint32_t[number_of_sv];
memcpy(new_support_vector_idx, support_vector_idx,
remaining_number_of_sv * sizeof(uint32_t));
memcpy(new_support_vector_idx + remaining_number_of_sv,
violation_idx_sorted_unique,
number_of_violations_unique * sizeof(uint32_t));
delete[] violation_idx_sorted_unique;
violation_idx_sorted_unique = NULL;
delete[] support_vector_idx;
support_vector_idx = new_support_vector_idx;
// Delete dynamic allocated memories:
delete[] mini_y;
mini_y = NULL;
delete[] mini_a;
mini_a = NULL;
delete[] mini_s;
mini_s = NULL;
delete[] mini_K;
mini_K = NULL;
}
delete[] support_vector_idx;
support_vector_idx = NULL;
cout << "Remaining Number of SV = " << remaining_number_of_sv << endl;
// Predict training set and get training error:
// clock_t training_error_time_start = clock();
uint32_t training_error_number = 0;
for (uint32_t i = 0; i < L; i++) {
int y_hat = (float(0) < sigma[i]) - (sigma[i] < float(0));
if (y_hat == y[i]) continue;
training_error_number++;
}
float training_error_rate = float(training_error_number) / L;
cout << "Precision = " << (1 - training_error_rate) * 100 << "%; ";
// clock_t training_error_time_finish = clock();
// double training_error_time = double(training_error_time_finish -
// training_error_time_start) / CLOCKS_PER_SEC; cout << "Precision time cost =
// " << training_error_time << endl;
// Record alpha, X and y:
all_L[pair_index] = L;
all_alpha[pair_index] = new float[L];
all_y[pair_index] = new int[L];
all_X[pair_index] = new uint8_t *[L];
for (size_t i = 0; i < L; i++) {
all_alpha[pair_index][i] = alpha[i];
all_y[pair_index][i] = y[i];
all_X[pair_index][i] = new uint8_t[n_features];
for (size_t j = 0; j < n_features; j++) {
all_X[pair_index][i][j] = train_images_1v1[i][j];
}
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return -1;
}
delete[] train_labels;
train_labels = NULL;
for (size_t i = 0; i < number_of_images; i++) {
delete[] train_images[i];
train_images[i] = NULL;
}
delete[] train_images;
train_images = NULL;
delete[] train_images_1v1;
train_images_1v1 = NULL;
delete[] train_labels_1v1;
train_labels_1v1 = NULL;
delete[] B;
B = NULL;
delete[] K;
K = NULL;
// delete[] K_ts; K_ts = NULL;
delete[] alpha;
alpha = NULL;
delete[] sigma;
sigma = NULL;
delete[] iters;
iters = NULL;
// No need to delete duplications: y.
clock_t total_finish_time = clock();
double total_time =
double(total_finish_time - total_start_time) / CLOCKS_PER_SEC;
cout << "Total svm time used = " << total_minibatch_optimisation_time << endl;
cout << "Total time used = " << total_time << endl;
return total_time;
}
bool kkt_conditions_monitor(uint32_t L, int *y, float *sigma, float *alpha,
const int &C) {
for (uint32_t i = 0; i < L; i++) {
float yfx = y[i] * sigma[i];
if (alpha[i] == 0 && yfx < 0.99) {
// printf("$1 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false;
} else if (alpha[i] == C && yfx > 1.01) {
// printf("$2 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false;
} else if (0 < alpha[i] && alpha[i] < C && !(abs(yfx - 1) < 0.01)) {
// printf("$3 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false; // Don't 0 < alpha[i] < C, Wrong!
}
}
return true;
}
float dot_product_float(float vect_A[], float vect_B[], int n) {
float product = 0;
for (int i = 0; i < n; i++) product += vect_A[i] * vect_B[i];
return product;
}
| 7eedd38e02c75e10ccefd99ebcaf0bfeec658921.cu | /*
by Qin Yu, Apr 2019
*/
#include <algorithm> // sort, any_of.
#include <cassert> // assert.
#include <iostream> // cout, endl.
using namespace std;
#include "svm.h"
// Not used but may help:
// #include <stdio.h>
// #include <stdlib.h>
// #include <random>
// #include <ctime>
int main(int argc, char const *argv[]) {
cout << argc << endl;
for (size_t i = 0; i < argc; i++) {
cout << "argv[" << i << "] = " << argv[i] << endl;
}
if (argc != 4) {
cout << "Must have 1 integer argument for C and M, and float for ACCURACY" << endl;
return 1;
}
uint32_t C, M;
float ACCURACY;
sscanf(argv[1], "%d", &C);
sscanf(argv[2], "%d", &M);
sscanf(argv[3], "%f", &ACCURACY);
cout << "C = " << C << " and M = " << M << " with accuracy = " << ACCURACY << endl;
int class1_limit = 2;
int class2_limit = 2;
int number_of_SVMs = ((class1_limit - 1) * class2_limit) / 2;
cout << "Will train " << number_of_SVMs << " SVMs" << endl;
// Record trained SVMs:
float **all_alpha = new float *[45]();
uint8_t ***all_X = new uint8_t **[45]();
int **all_y = new int *[45]();
int *all_L = new int[45]();
double *time_all_pairs = new double[45]();
int pair_counter = 0;
for (size_t i = 0; i < class1_limit; i++) {
for (size_t j = 0; j < class2_limit; j++) {
if (i < j) {
cout << endl << "Starting training " << i << " vs " << j << endl;
time_all_pairs[pair_counter] = mnist_2_class_training(
i, j, pair_counter, C, M, ACCURACY, all_L, all_alpha, all_X, all_y);
if (time_all_pairs[pair_counter] < 0) {
cout << "Something Wrong when training SVM " << i << " vs " << j
<< endl;
return -1;
}
// cout << "Time spent on " << i << " vs " << j << " = " <<
// time_all_pairs[pair_counter] << endl;
pair_counter++;
}
}
}
cout << "---------------------------------------------" << endl;
pair_counter = 0;
double total_time = 0;
for (size_t i = 0; i < class1_limit; i++) {
for (size_t j = 0; j < class2_limit; j++) {
if (i < j) {
cout << "Time spent on " << i << " vs " << j << " = "
<< time_all_pairs[pair_counter] << endl;
total_time += time_all_pairs[pair_counter];
pair_counter++;
}
}
}
cout << "Total time spent on training all " << number_of_SVMs
<< "SVMs = " << total_time << endl;
// Load MNIST test data:
int mnist_loading_error = 0;
uint32_t magic_number_label_ts;
uint32_t number_of_labels_ts;
uint8_t *test_labels;
mnist_loading_error = read_MNIST_test_labels(
magic_number_label_ts, number_of_labels_ts, test_labels);
if (mnist_loading_error) return -1;
uint32_t magic_number_image_ts;
uint32_t number_of_images_ts, n_rows_ts, n_cols_ts, n_pixels_ts,
n_features_ts;
uint8_t **test_images;
mnist_loading_error = read_MNIST_test_images(
magic_number_image_ts, number_of_images_ts, n_rows_ts, n_cols_ts,
n_pixels_ts, n_features_ts, test_images);
if (mnist_loading_error) {
delete[] test_labels;
test_labels = NULL;
for (size_t i = 0; i < number_of_images_ts; i++) {
delete[] test_images[i];
test_images[i] = NULL;
}
delete[] test_images;
test_images = NULL;
return -1;
}
// Fraction Table:
float *B = new float[256 * 256];
for (int i = 0; i < 256; i++)
for (int j = 0; j < 256; j++)
B[i * 256 + j] = pow(i / 255.0f - j / 255.0f, 2);
// Compute Testing Kernel Matrix:
int **all_y_hat = new int *[45];
int L_ts = number_of_images_ts;
uint8_t **X_ts = test_images;
uint8_t *y_ts = test_labels;
cudaError_t cudaStatus;
int idx = 0;
for (size_t m = 0; m < class1_limit; m++) {
for (size_t n = 0; n < class2_limit; n++) {
if (m < n) {
int L = all_L[idx];
uint8_t **X = all_X[idx];
int *y = all_y[idx];
all_y_hat[idx] = new int[L_ts];
float *alpha = all_alpha[idx];
float *alpha_y = new float[L](); // alpah .* y
float *K_ts = new float[L_ts * L];
cudaStatus = compute_kernel_matrix_ts(785, L_ts, X_ts, L, X, K_ts, B);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kernel_kernel_matrix launch failed: %s\n",
cudaGetErrorString(cudaStatus));
}
for (size_t i = 0; i < L; i++) alpha_y[i] = alpha[i] * y[i];
for (uint32_t i = 0; i < L_ts; i++) {
float temp_sigma = dot_product_float((K_ts + i * L), alpha_y, L);
int is_positive = (float(0) < temp_sigma) - (temp_sigma < float(0));
all_y_hat[idx][i] = (is_positive > 0) ? m : n;
}
delete[] alpha_y;
alpha_y = NULL;
delete[] K_ts;
K_ts = NULL;
idx++;
// cout << "no problem 1" << endl;
} // cout << "no problem 2" << endl;
} // cout << "no problem 3" << endl;
} // cout << "no problem 4" << endl;
cout << "L_ts = " << L_ts << endl;
int *final_pred_y = new int[L_ts];
int testing_error_number = 0;
for (size_t i = 0; i < L_ts; i++) {
int count_vote[10] = {0};
for (size_t j = 0; j < number_of_SVMs; j++) {
count_vote[all_y_hat[j][i]] += 1;
}
int max_vote_number = 0;
int max_vote_index = 0;
for (size_t k = 0; k < 10; k++) {
if (count_vote[k] > max_vote_number) {
max_vote_number = count_vote[k];
max_vote_index = k;
}
}
final_pred_y[i] = max_vote_index;
if (final_pred_y[i] != y_ts[i]) testing_error_number++;
}
float testing_error_rate = float(testing_error_number) / L_ts;
cout << "Final Testing Error 10 Classes = " << (1 - testing_error_rate) * 100
<< "%" << endl;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_y_hat[i];
all_y_hat[i] = NULL;
}
delete[] all_y_hat;
all_y_hat = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_y[i];
all_y[i] = NULL;
}
delete[] all_y;
all_y = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
delete[] all_alpha[i];
all_alpha[i] = NULL;
}
delete[] all_alpha;
all_alpha = NULL;
for (size_t i = 0; i < number_of_SVMs; i++) {
for (size_t j = 0; j < all_L[i]; j++) {
delete[] all_X[i][j];
all_X[i][j] = NULL;
}
delete[] all_X[i];
all_X[i] = NULL;
}
delete[] all_X;
all_X = NULL;
delete[] all_L;
all_L = NULL;
delete[] time_all_pairs;
time_all_pairs = NULL;
delete[] test_labels;
test_labels = NULL;
for (size_t i = 0; i < number_of_images_ts; i++) {
delete[] test_images[i];
test_images[i] = NULL;
}
delete[] test_images;
test_images = NULL;
delete[] B;
B = NULL;
return 0;
}
double mnist_2_class_training(int class1, int class2, int pair_index,
uint32_t C, uint32_t M, float accuracy,
int *&all_L, float **&all_alpha,
uint8_t ***&all_X, int **&all_y) {
clock_t total_start_time = clock();
assert(sizeof(float) == 4);
// Load MNIST:
clock_t total_loading_time = clock();
int mnist_loading_error = 0;
uint32_t magic_number_label;
uint32_t number_of_labels;
uint8_t *train_labels;
mnist_loading_error = read_MNIST_train_labels(magic_number_label,
number_of_labels, train_labels);
if (mnist_loading_error) return -1;
uint32_t magic_number_image;
uint32_t number_of_images, n_rows, n_cols, n_pixels, n_features;
uint8_t **train_images;
mnist_loading_error =
read_MNIST_train_images(magic_number_image, number_of_images, n_rows,
n_cols, n_pixels, n_features, train_images);
if (mnist_loading_error) {
delete[] train_labels;
train_labels = NULL;
for (size_t i = 0; i < number_of_images; i++) {
delete[] train_images[i];
train_images[i] = NULL;
}
delete[] train_images;
train_images = NULL;
return -1;
}
clock_t time_finished_loading = clock();
cout << "load time = "
<< double(time_finished_loading - total_start_time) / CLOCKS_PER_SEC
<< endl;
// Extract 1v1 data:
uint8_t class_label_pos = uint8_t(class1), class_label_neg = uint8_t(class2);
uint32_t number_of_labels_pos = 0, number_of_labels_neg = 0,
number_of_data_1v1 = 0;
uint8_t **train_images_1v1;
int8_t *train_labels_1v1;
extract_train_data_1v1(class_label_pos, number_of_labels_pos, class_label_neg,
number_of_labels_neg, number_of_labels,
number_of_images, number_of_data_1v1, train_labels,
train_labels_1v1, train_images, train_images_1v1,
n_features, n_cols);
clock_t time_finished_extracting = clock();
cout << "extract time = "
<< double(time_finished_extracting - time_finished_loading) /
CLOCKS_PER_SEC
<< endl;
// Fraction Table:
float *K = new float[number_of_data_1v1 *
number_of_data_1v1]; // Must be defined by `new`.
float *B = new float[256 * 256];
for (int i = 0; i < 256; i++)
for (int j = 0; j < 256; j++)
B[i * 256 + j] = pow(i / 255.0f - j / 255.0f, 2);
// Compute Kernel Matrix:
cudaError_t cudaStatus;
clock_t time_started_kernelmatrix = clock();
cudaStatus = compute_kernel_matrix(n_features, number_of_data_1v1,
train_images_1v1, K, B);
clock_t time_finished_kernelmatrix = clock();
cout << "matrix time = "
<< double(time_finished_kernelmatrix - time_started_kernelmatrix) /
CLOCKS_PER_SEC
<< endl;
// Set meta parameters:
const uint32_t L = number_of_data_1v1;
// Define alpha, sigma:
int *iters = new int[1]();
float *alpha = new float[L](); // `()` initialises it to zeros.
float *sigma = new float[L]();
int *y = new int[L];
for (size_t i = 0; i < L; i++) y[i] = train_labels_1v1[i];
float *alpha_y = new float[L](); // alpah .* y
// Select initial working set:
uint32_t *all_data_point_idx = new uint32_t[L];
for (uint32_t i = 0; i < M; i++) all_data_point_idx[i] = i;
uint32_t *support_vector_idx = new uint32_t[M];
for (uint32_t i = 0; i < M; i++) support_vector_idx[i] = i;
uint32_t number_of_sv = M;
// Decomposition/Mini-batch algorithm:
uint32_t kkt_counter = 0;
// int number_of_single_violation = 0;
double total_minibatch_optimisation_time = 0;
uint32_t remaining_number_of_sv = 0;
while (!kkt_conditions_monitor(L, y, sigma, alpha, C) && kkt_counter < 100) {
kkt_counter++;
cout << "LOOPING: " << kkt_counter << " ";
// Select data for GPU (not prepare GPU data):
int *mini_y = new int[number_of_sv];
float *mini_a = new float[number_of_sv];
float *mini_s = new float[number_of_sv];
float *mini_K = new float[number_of_sv * number_of_sv];
for (size_t i = 0; i < number_of_sv; i++) {
mini_y[i] = y[support_vector_idx[i]];
mini_a[i] = alpha[support_vector_idx[i]];
mini_s[i] = sigma[support_vector_idx[i]];
for (size_t j = 0; j < number_of_sv; j++)
mini_K[i * number_of_sv + j] =
K[support_vector_idx[i] * L + support_vector_idx[j]];
}
// Call GPU kernel (including GPU data preparation):
clock_t kernel_start_time = clock();
if (number_of_sv > 1024) {
cudaStatus = kernel_minibatch_wrapper(iters, mini_a, mini_s, mini_K,
mini_y, number_of_sv, C);
} else {
cudaStatus = kernel_minibatch_block_wrapper(iters, mini_a, mini_s, mini_K,
mini_y, number_of_sv, C);
}
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "wrapper failed!");
return -1;
}
clock_t kernel_finish_time = clock();
total_minibatch_optimisation_time +=
double(kernel_finish_time - kernel_start_time) / CLOCKS_PER_SEC;
// Update gradient:
for (size_t i = 0; i < number_of_sv; i++)
alpha[support_vector_idx[i]] = mini_a[i];
for (size_t i = 0; i < L; i++) {
sigma[i] = 0;
for (size_t j = 0; j < L; j++) sigma[i] += alpha[j] * y[j] * K[i * L + j];
}
// Remove non-support vectors:
// To avoid getting right array length by another loop, allocate excessive
// amount of memory.
uint32_t *remaining_support_vector_idx = new uint32_t[number_of_sv];
remaining_number_of_sv = 0;
for (size_t i = 0; i < number_of_sv; i++) {
if (mini_a[i] == 0) continue;
remaining_support_vector_idx[remaining_number_of_sv] =
support_vector_idx[i];
remaining_number_of_sv++;
}
delete[] support_vector_idx;
support_vector_idx = new uint32_t[remaining_number_of_sv];
memcpy(support_vector_idx, remaining_support_vector_idx,
remaining_number_of_sv * sizeof(uint32_t));
delete[] remaining_support_vector_idx;
remaining_support_vector_idx = NULL;
// Select new points who violate KKT conditions:
float *violation_val_dirty = new float[L];
uint32_t *violation_idx_dirty = new uint32_t[L];
uint32_t number_of_violations = 0;
const float kkt1 = 1 - accuracy;
const float kkt2 = 1 + accuracy;
const float kkt3 = 0.01;
for (uint32_t i = 0; i < L; i++) {
float yfx = y[i] * sigma[i];
if ((alpha[i] == 0 && yfx < kkt1) ||
(alpha[i] == C && yfx > kkt2) ||
(0 < alpha[i] && alpha[i] < C && !(abs(yfx - 1) < kkt3))) {
violation_idx_dirty[number_of_violations] = i;
violation_val_dirty[number_of_violations] = yfx;
number_of_violations++;
}
}
cout << "number of new violation is " << number_of_violations << endl;
float *violation_val = new float[number_of_violations];
uint32_t *violation_idx = new uint32_t[number_of_violations];
memcpy(violation_val, violation_val_dirty,
number_of_violations * sizeof(float));
memcpy(violation_idx, violation_idx_dirty,
number_of_violations * sizeof(uint32_t));
delete[] violation_val_dirty;
violation_val_dirty = NULL;
delete[] violation_idx_dirty;
violation_idx_dirty = NULL;
// Sort new points and discard duplication (with respect to remaining
// working set):
uint32_t *sort_perm = new uint32_t[number_of_violations];
for (uint32_t i = 0; i < number_of_violations; i++) sort_perm[i] = i;
sort(sort_perm, sort_perm + number_of_violations,
[violation_val](uint32_t a, uint32_t b) -> bool {
return violation_val[a] < violation_val[b];
});
uint32_t *violation_idx_sorted_unique = new uint32_t[number_of_violations];
uint32_t number_of_violations_unique = 0;
for (size_t i = 0; i < number_of_violations; i++) {
uint32_t this_support_vector_idx = violation_idx[sort_perm[i]];
if (any_of(support_vector_idx,
support_vector_idx + remaining_number_of_sv,
[this_support_vector_idx](uint32_t idx) {
return idx == this_support_vector_idx;
}))
continue;
violation_idx_sorted_unique[number_of_violations_unique] =
this_support_vector_idx;
number_of_violations_unique++;
}
delete[] sort_perm;
sort_perm = NULL;
delete[] violation_val;
violation_val = NULL;
delete[] violation_idx;
violation_idx = NULL;
// Concatenate remaining working set and violation set:
number_of_sv = remaining_number_of_sv + number_of_violations_unique;
uint32_t *new_support_vector_idx = new uint32_t[number_of_sv];
memcpy(new_support_vector_idx, support_vector_idx,
remaining_number_of_sv * sizeof(uint32_t));
memcpy(new_support_vector_idx + remaining_number_of_sv,
violation_idx_sorted_unique,
number_of_violations_unique * sizeof(uint32_t));
delete[] violation_idx_sorted_unique;
violation_idx_sorted_unique = NULL;
delete[] support_vector_idx;
support_vector_idx = new_support_vector_idx;
// Delete dynamic allocated memories:
delete[] mini_y;
mini_y = NULL;
delete[] mini_a;
mini_a = NULL;
delete[] mini_s;
mini_s = NULL;
delete[] mini_K;
mini_K = NULL;
}
delete[] support_vector_idx;
support_vector_idx = NULL;
cout << "Remaining Number of SV = " << remaining_number_of_sv << endl;
// Predict training set and get training error:
// clock_t training_error_time_start = clock();
uint32_t training_error_number = 0;
for (uint32_t i = 0; i < L; i++) {
int y_hat = (float(0) < sigma[i]) - (sigma[i] < float(0));
if (y_hat == y[i]) continue;
training_error_number++;
}
float training_error_rate = float(training_error_number) / L;
cout << "Precision = " << (1 - training_error_rate) * 100 << "%; ";
// clock_t training_error_time_finish = clock();
// double training_error_time = double(training_error_time_finish -
// training_error_time_start) / CLOCKS_PER_SEC; cout << "Precision time cost =
// " << training_error_time << endl;
// Record alpha, X and y:
all_L[pair_index] = L;
all_alpha[pair_index] = new float[L];
all_y[pair_index] = new int[L];
all_X[pair_index] = new uint8_t *[L];
for (size_t i = 0; i < L; i++) {
all_alpha[pair_index][i] = alpha[i];
all_y[pair_index][i] = y[i];
all_X[pair_index][i] = new uint8_t[n_features];
for (size_t j = 0; j < n_features; j++) {
all_X[pair_index][i][j] = train_images_1v1[i][j];
}
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return -1;
}
delete[] train_labels;
train_labels = NULL;
for (size_t i = 0; i < number_of_images; i++) {
delete[] train_images[i];
train_images[i] = NULL;
}
delete[] train_images;
train_images = NULL;
delete[] train_images_1v1;
train_images_1v1 = NULL;
delete[] train_labels_1v1;
train_labels_1v1 = NULL;
delete[] B;
B = NULL;
delete[] K;
K = NULL;
// delete[] K_ts; K_ts = NULL;
delete[] alpha;
alpha = NULL;
delete[] sigma;
sigma = NULL;
delete[] iters;
iters = NULL;
// No need to delete duplications: y.
clock_t total_finish_time = clock();
double total_time =
double(total_finish_time - total_start_time) / CLOCKS_PER_SEC;
cout << "Total svm time used = " << total_minibatch_optimisation_time << endl;
cout << "Total time used = " << total_time << endl;
return total_time;
}
bool kkt_conditions_monitor(uint32_t L, int *y, float *sigma, float *alpha,
const int &C) {
for (uint32_t i = 0; i < L; i++) {
float yfx = y[i] * sigma[i];
if (alpha[i] == 0 && yfx < 0.99) {
// printf("$1 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false;
} else if (alpha[i] == C && yfx > 1.01) {
// printf("$2 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false;
} else if (0 < alpha[i] && alpha[i] < C && !(abs(yfx - 1) < 0.01)) {
// printf("$3 the %u th alpha %.7f has yf(x) = %f\n", i, alpha[i], yfx);
return false; // Don't 0 < alpha[i] < C, Wrong!
}
}
return true;
}
float dot_product_float(float vect_A[], float vect_B[], int n) {
float product = 0;
for (int i = 0; i < n; i++) product += vect_A[i] * vect_B[i];
return product;
}
|
05a0b3060c9017ec29936349d72cd3aa7e1219b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorTopK.hip"
#else
#include <c10/macros/Macros.h>
#include <c10/hip/HIPException.h>
void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
dim = at::maybe_wrap_dim(dim, input_);
THArgCheck(THCTensor_(nDimension)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
std::vector<int64_t> topKSize = input->sizes().vec();
if (topKSize.size() > 0) {
topKSize[dim] = k;
}
THCTensor_(resize)(state, topK, topKSize, {});
THCudaLongTensor_resize(state, indices, topKSize, {});
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_HIP_KERNEL_LAUNCH_CHECK()
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<scalar_t, INDEX_T> inputInfo = \
getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \
TensorInfo<scalar_t, INDEX_T> topKInfo = \
getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(::min(THCRoundUp(sliceSize, (int64_t) C10_WARP_SIZE), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
// the below is safe with 0-dimensional tensors because it is based on
// THCTensorInfo which implicitly expands to 1-dimensional.
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && THCTensor_(numel)(state, topK) > 1) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if TORCH_HIP_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(hipGetLastError());
}
#endif // THC_GENERIC_FILE
| 05a0b3060c9017ec29936349d72cd3aa7e1219b2.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorTopK.cu"
#else
#include <c10/macros/Macros.h>
#include <c10/cuda/CUDAException.h>
void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
dim = at::maybe_wrap_dim(dim, input_);
THArgCheck(THCTensor_(nDimension)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
std::vector<int64_t> topKSize = input->sizes().vec();
if (topKSize.size() > 0) {
topKSize[dim] = k;
}
THCTensor_(resize)(state, topK, topKSize, {});
THCudaLongTensor_resize(state, indices, topKSize, {});
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<scalar_t, INDEX_T, DIM, DIR> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_CUDA_KERNEL_LAUNCH_CHECK()
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<scalar_t, INDEX_T> inputInfo = \
getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \
TensorInfo<scalar_t, INDEX_T> topKInfo = \
getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) C10_WARP_SIZE), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
// the below is safe with 0-dimensional tensors because it is based on
// THCTensorInfo which implicitly expands to 1-dimensional.
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && THCTensor_(numel)(state, topK) > 1) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if CUDA_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(cudaGetLastError());
}
#endif // THC_GENERIC_FILE
|
250075d65ce63fe70abd66c37a189d0fcfed8d28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudafuncs.hpp"
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if(err!=hipSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
__global__ void bgr_to_gray_kernel( unsigned char* input,
unsigned char* output,
int width,
int height,
int colorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height))
{
//Location of colored pixel in input
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Location of gray pixel in output
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = input[color_tid];
const unsigned char green = input[color_tid + 1];
const unsigned char red = input[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
output[gray_tid] = static_cast<unsigned char>(gray);
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int colorBytes = input.step * input.rows;
const int grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input,input.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the color conversion kernel
hipLaunchKernelGGL(( bgr_to_gray_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.cols,input.rows,input.step,output.step);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(),d_output,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(hipFree(d_input),"CUDA Free Failed");
SAFE_CALL(hipFree(d_output),"CUDA Free Failed");
}
| 250075d65ce63fe70abd66c37a189d0fcfed8d28.cu | #include "cudafuncs.hpp"
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
__global__ void bgr_to_gray_kernel( unsigned char* input,
unsigned char* output,
int width,
int height,
int colorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height))
{
//Location of colored pixel in input
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Location of gray pixel in output
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = input[color_tid];
const unsigned char green = input[color_tid + 1];
const unsigned char red = input[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
output[gray_tid] = static_cast<unsigned char>(gray);
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int colorBytes = input.step * input.rows;
const int grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input,input.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the color conversion kernel
bgr_to_gray_kernel<<<grid,block>>>(d_input,d_output,input.cols,input.rows,input.step,output.step);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(),d_output,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(cudaFree(d_input),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_output),"CUDA Free Failed");
}
|
21e6f8a1cc621870adcd86dcb0e83eadbb60e4ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix.hh"
#include "nn_exception.hh"
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
//void Matrix::allocateCudaMemory() {
// if (!device_allocated) {
// float* device_memory = nullptr;
// hipMalloc(&device_memory, shape.x * shape.y * sizeof(float));
// NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
// data_device = std::shared_ptr<float>(device_memory,
// [&](float* ptr){ hipFree(ptr); });
// device_allocated = true;
// }
//}
//
//void Matrix::allocateHostMemory() {
// if (!host_allocated) {
// data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
// [&](float* ptr){ delete[] ptr; });
// host_allocated = true;
// }
//}
void Matrix::allocateMemory() {
float* mem;
hipMallocManaged(&mem, shape.x*shape.y*sizeof(float));
data = std::shared_ptr<float>(mem,
[&](float* ptr){ hipFree(ptr); });
device_allocated = true;
host_allocated = true;
//allocateCudaMemory();
//allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
float& Matrix::operator[](const int index) {
return data.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data.get()[index];
}
| 21e6f8a1cc621870adcd86dcb0e83eadbb60e4ff.cu | #include "matrix.hh"
#include "nn_exception.hh"
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
//void Matrix::allocateCudaMemory() {
// if (!device_allocated) {
// float* device_memory = nullptr;
// cudaMalloc(&device_memory, shape.x * shape.y * sizeof(float));
// NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
// data_device = std::shared_ptr<float>(device_memory,
// [&](float* ptr){ cudaFree(ptr); });
// device_allocated = true;
// }
//}
//
//void Matrix::allocateHostMemory() {
// if (!host_allocated) {
// data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
// [&](float* ptr){ delete[] ptr; });
// host_allocated = true;
// }
//}
void Matrix::allocateMemory() {
float* mem;
cudaMallocManaged(&mem, shape.x*shape.y*sizeof(float));
data = std::shared_ptr<float>(mem,
[&](float* ptr){ cudaFree(ptr); });
device_allocated = true;
host_allocated = true;
//allocateCudaMemory();
//allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
float& Matrix::operator[](const int index) {
return data.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data.get()[index];
}
|
2c6f9f777b84d3f0abe67000c1b2122ab67a1eae.hip | // !!! This is a file automatically generated by hipify!!!
// tests hipEventCreate
#include <iostream>
#include <memory>
#include <unistd.h>
using namespace std;
#include <hip/hip_runtime.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void myCallback(hipStream_t stream, size_t status, void *data) {
char *message = (char *)data;
cout << "message " << message << endl;
}
int main(int argc, char *argv[]) {
int N = 52400; // * 1024;
// float *hostfloats = new float[N];
float *gpufloats;
hipMalloc((void **)&gpufloats, N * sizeof(float));
hipStream_t stream;
hipStreamCreate__(&stream, 0);
hipLaunchKernelGGL(( longKernel), dim3(dim3(102400 / 32, 1, 1)), dim3(dim3(32, 1, 1)), 0, stream, gpufloats, N, 3.0f);
cout << "queued kernel x" << endl;
const char *message = "hello";
hipStreamAddCallback(stream, myCallback, (void *)message, 0);
cout << "added callback" << endl;
sleep(1);
cout << "synchronizing..." << endl;
hipStreamSynchronize(stream);
cout << "... synchronized" << endl;
hipStreamDestroy(stream);
hipFree(gpufloats);
cout << "finished" << endl;
return 0;
}
| 2c6f9f777b84d3f0abe67000c1b2122ab67a1eae.cu | // tests cuEventCreate
#include <iostream>
#include <memory>
#include <unistd.h>
using namespace std;
#include <cuda.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void myCallback(CUstream stream, size_t status, void *data) {
char *message = (char *)data;
cout << "message " << message << endl;
}
int main(int argc, char *argv[]) {
int N = 52400; // * 1024;
// float *hostfloats = new float[N];
float *gpufloats;
cudaMalloc((void **)&gpufloats, N * sizeof(float));
CUstream stream;
cuStreamCreate(&stream, 0);
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>(gpufloats, N, 3.0f);
cout << "queued kernel x" << endl;
const char *message = "hello";
cudaStreamAddCallback(stream, myCallback, (void *)message, 0);
cout << "added callback" << endl;
sleep(1);
cout << "synchronizing..." << endl;
cuStreamSynchronize(stream);
cout << "... synchronized" << endl;
cuStreamDestroy(stream);
cudaFree(gpufloats);
cout << "finished" << endl;
return 0;
}
|
192f0612d093495c8025d61132cebf1e43ec2859.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file intermediatePressure.cu
* \author Christopher Minar ([email protected])
* \brief kernels to generate the right hand side of the poission equation
*/
#include "intermediatePressure.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void intermediatePressure_luo(double *rhs2, double *uhat, double *ym, double *yp, double *xm, double *xp, double *dx, double *dy, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny)
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iu = (nx-1)*J + I,
iv = (nx-1)*ny + nx*J +I;
double temp = 0;
//EAST
//if not on the east wall, add east term
if (I != nx-1)//not at east boundry
temp -= uhat[iu]/dx[I];
else if (I == nx-1)//at east boundry
temp -= xp[J]/dx[I];
//WEST
//if not on west wall, add west term
if (I != 0)//not at west boundary
temp += uhat[iu - 1]/dx[I];
else if (I == 0)//at the west boundary
temp += xm[J]/dx[I];
//NORTH
//if not on north wall, add north term
if (J != ny-1)//not at north boundry
temp -= uhat[iv]/dy[J];
else if (J == ny-1)//at north boundry
temp -= yp[(nx-1)+I]/dy[J];
//SOUTH
//if not on south wall, add south term
if (J != 0)//not at south boundry
temp += uhat[iv-nx]/dy[J];
else if (J == 0)//at south boundry
temp += ym[(nx-1)+I]/dy[J];
rhs2[ip] = temp;
}
}
| 192f0612d093495c8025d61132cebf1e43ec2859.cu | /***************************************************************************//**
* \file intermediatePressure.cu
* \author Christopher Minar ([email protected])
* \brief kernels to generate the right hand side of the poission equation
*/
#include "intermediatePressure.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void intermediatePressure_luo(double *rhs2, double *uhat, double *ym, double *yp, double *xm, double *xp, double *dx, double *dy, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny)
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iu = (nx-1)*J + I,
iv = (nx-1)*ny + nx*J +I;
double temp = 0;
//EAST
//if not on the east wall, add east term
if (I != nx-1)//not at east boundry
temp -= uhat[iu]/dx[I];
else if (I == nx-1)//at east boundry
temp -= xp[J]/dx[I];
//WEST
//if not on west wall, add west term
if (I != 0)//not at west boundary
temp += uhat[iu - 1]/dx[I];
else if (I == 0)//at the west boundary
temp += xm[J]/dx[I];
//NORTH
//if not on north wall, add north term
if (J != ny-1)//not at north boundry
temp -= uhat[iv]/dy[J];
else if (J == ny-1)//at north boundry
temp -= yp[(nx-1)+I]/dy[J];
//SOUTH
//if not on south wall, add south term
if (J != 0)//not at south boundry
temp += uhat[iv-nx]/dy[J];
else if (J == 0)//at south boundry
temp += ym[(nx-1)+I]/dy[J];
rhs2[ip] = temp;
}
}
|
4f8f29c13b082da507a404f04261e66d5654602c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "matrixTools.c"
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define TOL 1e-6
#define limit_iter 10000
#define BLOCK_SIZE 50
__device__ int flag;
__global__ void jacobiOnDevice(int n, float *A, float *b, float *x_iter, float *x_iter_new)
{
__shared__ float x_iter_sh[BLOCK_SIZE];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
float sigma = 0;
for (int k=0; k < (BLOCK_SIZE+n-1)/BLOCK_SIZE; k++)
{
x_iter_sh[threadIdx.x] = x_iter[k*BLOCK_SIZE+threadIdx.x];
__syncthreads();
for (int kk=0; kk<BLOCK_SIZE; kk++)
{
if (i != (kk+BLOCK_SIZE*k)) sigma += (A[i*n+(kk+BLOCK_SIZE*k)]*x_iter_sh[kk]);
}
__syncthreads();
}
x_iter_new[i] = (b[i]-sigma)/A[i*n+i];
}
}
__global__ void checkConvergence(int n, float *x_iter, float *x_iter_new)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<n)
{
if (fabs(x_iter[i]-x_iter_new[i]) > TOL) flag = 0;
x_iter[i] = x_iter_new[i];
}
}
int main(int argc, char **argv)
{
int sizeCounter = atoi(argv[1]);
int tests = atoi(argv[2]);
for (int counter=3; counter < 3+sizeCounter; counter++)
{
for (int testCounter=0; testCounter < tests; testCounter++)
{
hipEvent_t start, stop;
float *A, *b, *x, *x_iter, *x_iter_new;
int n = atoi(argv[counter]);
hipMallocManaged(&A, n*n*sizeof(float));
hipMallocManaged(&b, n*sizeof(float));
hipMallocManaged(&x, n*sizeof(float));
hipMallocManaged(&x_iter, n*sizeof(float));
hipMallocManaged(&x_iter_new, n*sizeof(float));
char file_A[80], file_b[80], file_x[80];
sprintf(file_A, "test_input/matrix_A_%dx%d.txt", n, n);
sprintf(file_b, "test_input/vector_b_%dx1.txt", n);
sprintf(file_x, "test_input/vector_x_%dx1.txt", n);
read_matrix(n, A, file_A, " ");
read_vector(n, b, file_b, " ");
read_vector(n, x, file_x, " ");
for (int i=0; i<n; i++) x_iter[i] = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int k = 0, isConverged;
int gridSize = (int)ceil(1.0*n/BLOCK_SIZE);
do {
isConverged = 1;
hipMemcpyToSymbol(flag, &isConverged, sizeof(int));
hipLaunchKernelGGL(( jacobiOnDevice), dim3(gridSize), dim3(BLOCK_SIZE), 0, 0, n, A, b, x_iter, x_iter_new);
hipLaunchKernelGGL(( checkConvergence), dim3(gridSize), dim3(BLOCK_SIZE), 0, 0, n, x_iter, x_iter_new);
hipMemcpyFromSymbol(&isConverged, flag, sizeof(int));
hipDeviceSynchronize();
k++;
} while (k < limit_iter && !isConverged);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop);
hipDeviceSynchronize();
// hipError_t code=hipGetLastError();
// printf("%s\n", hipGetErrorString(code));
printf("%d ", n);
printf("%.6f ", milliseconds*1e-3);
printf("%.9f ", norm_vector(n, x_iter, x));
printf("%d\n", k);
hipFree(A); hipFree(b); hipFree(x); hipFree(x_iter); hipFree(x_iter_new);
}
}
return 0;
}
| 4f8f29c13b082da507a404f04261e66d5654602c.cu | #include <stdio.h>
#include <stdlib.h>
#include "matrixTools.c"
#include <sys/time.h>
#include <cuda.h>
#define TOL 1e-6
#define limit_iter 10000
#define BLOCK_SIZE 50
__device__ int flag;
__global__ void jacobiOnDevice(int n, float *A, float *b, float *x_iter, float *x_iter_new)
{
__shared__ float x_iter_sh[BLOCK_SIZE];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
float sigma = 0;
for (int k=0; k < (BLOCK_SIZE+n-1)/BLOCK_SIZE; k++)
{
x_iter_sh[threadIdx.x] = x_iter[k*BLOCK_SIZE+threadIdx.x];
__syncthreads();
for (int kk=0; kk<BLOCK_SIZE; kk++)
{
if (i != (kk+BLOCK_SIZE*k)) sigma += (A[i*n+(kk+BLOCK_SIZE*k)]*x_iter_sh[kk]);
}
__syncthreads();
}
x_iter_new[i] = (b[i]-sigma)/A[i*n+i];
}
}
__global__ void checkConvergence(int n, float *x_iter, float *x_iter_new)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<n)
{
if (fabs(x_iter[i]-x_iter_new[i]) > TOL) flag = 0;
x_iter[i] = x_iter_new[i];
}
}
int main(int argc, char **argv)
{
int sizeCounter = atoi(argv[1]);
int tests = atoi(argv[2]);
for (int counter=3; counter < 3+sizeCounter; counter++)
{
for (int testCounter=0; testCounter < tests; testCounter++)
{
cudaEvent_t start, stop;
float *A, *b, *x, *x_iter, *x_iter_new;
int n = atoi(argv[counter]);
cudaMallocManaged(&A, n*n*sizeof(float));
cudaMallocManaged(&b, n*sizeof(float));
cudaMallocManaged(&x, n*sizeof(float));
cudaMallocManaged(&x_iter, n*sizeof(float));
cudaMallocManaged(&x_iter_new, n*sizeof(float));
char file_A[80], file_b[80], file_x[80];
sprintf(file_A, "test_input/matrix_A_%dx%d.txt", n, n);
sprintf(file_b, "test_input/vector_b_%dx1.txt", n);
sprintf(file_x, "test_input/vector_x_%dx1.txt", n);
read_matrix(n, A, file_A, " ");
read_vector(n, b, file_b, " ");
read_vector(n, x, file_x, " ");
for (int i=0; i<n; i++) x_iter[i] = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int k = 0, isConverged;
int gridSize = (int)ceil(1.0*n/BLOCK_SIZE);
do {
isConverged = 1;
cudaMemcpyToSymbol(flag, &isConverged, sizeof(int));
jacobiOnDevice<<<gridSize, BLOCK_SIZE>>>(n, A, b, x_iter, x_iter_new);
checkConvergence<<<gridSize, BLOCK_SIZE>>>(n, x_iter, x_iter_new);
cudaMemcpyFromSymbol(&isConverged, flag, sizeof(int));
cudaDeviceSynchronize();
k++;
} while (k < limit_iter && !isConverged);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop);
cudaDeviceSynchronize();
// cudaError_t code=cudaGetLastError();
// printf("%s\n", cudaGetErrorString(code));
printf("%d ", n);
printf("%.6f ", milliseconds*1e-3);
printf("%.9f ", norm_vector(n, x_iter, x));
printf("%d\n", k);
cudaFree(A); cudaFree(b); cudaFree(x); cudaFree(x_iter); cudaFree(x_iter_new);
}
}
return 0;
}
|
e32a883c102d6aede3aaa8ff3e19b3980ae0be17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
namespace at { namespace native {
namespace {
template <typename T>
inline __device__ bool _isinf(T x) { return ::isinf(x); }
inline __device__ bool _isinf(c10::Half x) {
return ::isinf(static_cast<float>(x));
}
inline __device__ bool _isinf(c10::BFloat16 x) {
return ::isinf(static_cast<float>(x));
}
#define MAX_NUM_BLOCKS 64
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
const int64_t maxThreads = ::min(
props->maxThreadsPerBlock, cuda_utils::kCUDABlockReduceMaxThreads);
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(::min(maxThreads, C10_WARP_SIZE * ceil_div(cols, int64_t{C10_WARP_SIZE})));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
hipLaunchKernelGGL(( renormRowsL1<scalar_t>)
, dim3(grid), dim3(block), (block.x / C10_WARP_SIZE) * sizeof(scalar_t),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), t.data_ptr<scalar_t>(),
rows, cols);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(PhiloxCudaState philox_args,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
auto seeds = at::cuda::philox::unpack(philox_args);
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = hiprand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void sampleMultinomialOnce(
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
__shared__ unsigned foundPos;
accscalar_t *smem = reinterpret_cast<accscalar_t *>(my_smem);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(!at::_isnan(val));
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(!(val < zero));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = cuda_utils::BlockReduceSum(sum, smem);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(sum > accZero);
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
void multinomial_with_replacement_kernel_impl(
Tensor& result,
const Tensor& self,
const int64_t n_sample,
c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredWarps = at::ceil_div(numCategories, C10_WARP_SIZE);
int requiredThreads = ::min(maxThreads, requiredWarps * C10_WARP_SIZE);
int requiredShared = requiredThreads * sizeof(accscalar_t);
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(requiredThreads);
dim3 grid(::min(static_cast<int>(numDist), numSM * 4));
hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accscalar_t>)
, dim3(grid), dim3(block),
requiredShared,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::cuda::cumsum_out(prefixSum, normDist, 1);
PhiloxCudaState rng_engine_inputs;
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// hiprand_uniform4 (See Note [Register spilling in hiprand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_cuda_state(offset);
}
// Sample with replacement
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(
multinomial_with_replacement_stub,
&multinomial_with_replacement_kernel_impl);
}}
| e32a883c102d6aede3aaa8ff3e19b3980ae0be17.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
namespace at { namespace native {
namespace {
template <typename T>
inline __device__ bool _isinf(T x) { return ::isinf(x); }
inline __device__ bool _isinf(c10::Half x) {
return ::isinf(static_cast<float>(x));
}
inline __device__ bool _isinf(c10::BFloat16 x) {
return ::isinf(static_cast<float>(x));
}
#define MAX_NUM_BLOCKS 200
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
const int64_t maxThreads = std::min(
props->maxThreadsPerBlock, cuda_utils::kCUDABlockReduceMaxThreads);
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(std::min(maxThreads, C10_WARP_SIZE * ceil_div(cols, int64_t{C10_WARP_SIZE})));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
renormRowsL1<scalar_t>
<<<grid, block, (block.x / C10_WARP_SIZE) * sizeof(scalar_t),
at::cuda::getCurrentCUDAStream()>>>(t.data_ptr<scalar_t>(),
rows, cols);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(PhiloxCudaState philox_args,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
auto seeds = at::cuda::philox::unpack(philox_args);
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = curand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void sampleMultinomialOnce(
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
__shared__ unsigned foundPos;
accscalar_t *smem = reinterpret_cast<accscalar_t *>(my_smem);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(!at::_isnan(val));
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(!(val < zero));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = cuda_utils::BlockReduceSum(sum, smem);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(sum > accZero);
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
void multinomial_with_replacement_kernel_impl(
Tensor& result,
const Tensor& self,
const int64_t n_sample,
c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredWarps = at::ceil_div(numCategories, C10_WARP_SIZE);
int requiredThreads = std::min(maxThreads, requiredWarps * C10_WARP_SIZE);
int requiredShared = requiredThreads * sizeof(accscalar_t);
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(requiredThreads);
dim3 grid(std::min(static_cast<int>(numDist), numSM * 4));
sampleMultinomialOnce<scalar_t, accscalar_t>
<<<grid, block,
requiredShared,
at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::cuda::cumsum_out(prefixSum, normDist, 1);
PhiloxCudaState rng_engine_inputs;
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_cuda_state(offset);
}
// Sample with replacement
sampleMultinomialWithReplacement
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(
multinomial_with_replacement_stub,
&multinomial_with_replacement_kernel_impl);
}}
|
f5e49df7bf6f6f2354294cfb04887c4221bd8040.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
void VVMult_CPU(double* sum_ptr, const double* a, const double* b, long N){
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N; i++) sum += a[i]*b[i];
*sum_ptr = sum;
}
void MVMult_CPU(double* C, const double* A, const double* B, long N) {
for (long i = 0; i < N; i++) {
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long j = 0; j < N; j++) {
sum += A[i*N+j]*B[j];
}
C[i] = sum;
}
}
#define BLOCK_SIZE 1024
__global__ void reduction(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void product(double* sum, const double* A, const double* b, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = A[idx]*b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
int main() {
long N;
int exp;
std::cout << "N = 2^ " << std::endl;
std::cin >> exp;
N = (1UL<<exp);
double *x, *A;
hipHostMalloc((void**)&x, N * sizeof(double));
hipHostMalloc((void**)&A, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
x[i] = drand48();
}
for (long i = 0; i < N*N; i++) {
A[i] = drand48();
}
double *sum_ref, *sum;
hipHostMalloc((void**)&sum_ref, N * sizeof(double));
hipHostMalloc((void**)&sum, N * sizeof(double));
/* for (long i = 0; i < N; i++) {
sum_ref[i] = 0.0;
sum[i] = 0.0;
}*/
double tt = omp_get_wtime();
MVMult_CPU(sum_ref, A, x, N);
printf("CPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double *x_d, *A_d, *z_d;
hipMalloc(&x_d, N*sizeof(double));
hipMalloc(&A_d, N*N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
hipMalloc(&z_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
hipMemcpyAsync(x_d, x, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(A_d, A, N*N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
tt = omp_get_wtime();
for (long i = 0; i < N; i++) {
double* sum_d = z_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( product), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, A_d+i*N, x_d, N);
while (Nb > 1) {
long Nx = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + Nx, sum_d, Nx);
sum_d += Nx;
}
hipMemcpyAsync(&sum[i], sum_d, 1*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
printf("GPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double error = 0;
#pragma omp parallel for reduction(+:error)
for (long i = 0; i < N; i++) {
error = error + fabs(sum[i] - sum_ref[i]);
}
printf("Error = %f\n", error);
hipFree(x_d);
hipFree(z_d);
hipFree(A_d);
hipHostFree(x);
hipHostFree(A);
return 0;
}
| f5e49df7bf6f6f2354294cfb04887c4221bd8040.cu | #include <iostream>
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
void VVMult_CPU(double* sum_ptr, const double* a, const double* b, long N){
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N; i++) sum += a[i]*b[i];
*sum_ptr = sum;
}
void MVMult_CPU(double* C, const double* A, const double* B, long N) {
for (long i = 0; i < N; i++) {
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long j = 0; j < N; j++) {
sum += A[i*N+j]*B[j];
}
C[i] = sum;
}
}
#define BLOCK_SIZE 1024
__global__ void reduction(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void product(double* sum, const double* A, const double* b, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = A[idx]*b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
int main() {
long N;
int exp;
std::cout << "N = 2^ " << std::endl;
std::cin >> exp;
N = (1UL<<exp);
double *x, *A;
cudaMallocHost((void**)&x, N * sizeof(double));
cudaMallocHost((void**)&A, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
x[i] = drand48();
}
for (long i = 0; i < N*N; i++) {
A[i] = drand48();
}
double *sum_ref, *sum;
cudaMallocHost((void**)&sum_ref, N * sizeof(double));
cudaMallocHost((void**)&sum, N * sizeof(double));
/* for (long i = 0; i < N; i++) {
sum_ref[i] = 0.0;
sum[i] = 0.0;
}*/
double tt = omp_get_wtime();
MVMult_CPU(sum_ref, A, x, N);
printf("CPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double *x_d, *A_d, *z_d;
cudaMalloc(&x_d, N*sizeof(double));
cudaMalloc(&A_d, N*N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
cudaMalloc(&z_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
cudaMemcpyAsync(x_d, x, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(A_d, A, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
tt = omp_get_wtime();
for (long i = 0; i < N; i++) {
double* sum_d = z_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
product<<<Nb,BLOCK_SIZE>>>(sum_d, A_d+i*N, x_d, N);
while (Nb > 1) {
long Nx = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction<<<Nb,BLOCK_SIZE>>>(sum_d + Nx, sum_d, Nx);
sum_d += Nx;
}
cudaMemcpyAsync(&sum[i], sum_d, 1*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
printf("GPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double error = 0;
#pragma omp parallel for reduction(+:error)
for (long i = 0; i < N; i++) {
error = error + fabs(sum[i] - sum_ref[i]);
}
printf("Error = %f\n", error);
cudaFree(x_d);
cudaFree(z_d);
cudaFree(A_d);
cudaFreeHost(x);
cudaFreeHost(A);
return 0;
}
|
d1c9286ad003e659382f64599e6f6a14d64a5865.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaHelper.h"
#include <iostream>
__global__ void fill_buffer(uchar4 * d_dst, int w, int h) {
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
if (threadId < w*h) {
d_dst[threadId].x = 255; //magRed[i*w + j];
d_dst[threadId].y = 255;
d_dst[threadId].z = 255; //magBlue[i*w + j];
d_dst[threadId].w = 255;
}
}
void CUDAHelper::GeneratePBO(uchar4* dptr, int h, int w) {
const int BLOCKDIM_X = 32;
const int BLOCKDIM_Y = 32;
dim3 grid(iDivUp(w, BLOCKDIM_X), iDivUp(h, BLOCKDIM_Y));
dim3 block(BLOCKDIM_X, BLOCKDIM_Y);
// std::cout << "before launching kernel" << std::endl;
hipLaunchKernelGGL(( fill_buffer), dim3(grid), dim3(block) , 0, 0, dptr, h, w);
} | d1c9286ad003e659382f64599e6f6a14d64a5865.cu | #include "CudaHelper.h"
#include <iostream>
__global__ void fill_buffer(uchar4 * d_dst, int w, int h) {
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
if (threadId < w*h) {
d_dst[threadId].x = 255; //magRed[i*w + j];
d_dst[threadId].y = 255;
d_dst[threadId].z = 255; //magBlue[i*w + j];
d_dst[threadId].w = 255;
}
}
void CUDAHelper::GeneratePBO(uchar4* dptr, int h, int w) {
const int BLOCKDIM_X = 32;
const int BLOCKDIM_Y = 32;
dim3 grid(iDivUp(w, BLOCKDIM_X), iDivUp(h, BLOCKDIM_Y));
dim3 block(BLOCKDIM_X, BLOCKDIM_Y);
// std::cout << "before launching kernel" << std::endl;
fill_buffer<<< grid, block >>> (dptr, h, w);
} |
1517f4efb2e812f0395c72c2b9bb0f1736051c08.hip | // !!! This is a file automatically generated by hipify!!!
//TEST CASE PASS IN GPU_VERIFY. IT IS NOT VERIFY ARRAY BOUNDS VIOLATION
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
int* q;
q = p + 1;
p[threadIdx.x] = q[threadIdx.x];
}
int main() {
int *c;
int *d;
int *dev_c;
c = (int*)malloc(N*sizeof(int));
d = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i)
c[i] = 5;
hipMalloc((void**)&dev_c, N*sizeof(int));
hipMemcpy(dev_c, c, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, dev_c);
//ESBMC_verify_kernel(foo,1,N,dev_c);
hipMemcpy(d, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
assert(d[i]==c[i+1]);
}
free(c);
hipFree(dev_c);
return 0;
}
| 1517f4efb2e812f0395c72c2b9bb0f1736051c08.cu | //TEST CASE PASS IN GPU_VERIFY. IT IS NOT VERIFY ARRAY BOUNDS VIOLATION
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
int* q;
q = p + 1;
p[threadIdx.x] = q[threadIdx.x];
}
int main() {
int *c;
int *d;
int *dev_c;
c = (int*)malloc(N*sizeof(int));
d = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i)
c[i] = 5;
cudaMalloc((void**)&dev_c, N*sizeof(int));
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_c);
//ESBMC_verify_kernel(foo,1,N,dev_c);
cudaMemcpy(d, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
assert(d[i]==c[i+1]);
}
free(c);
cudaFree(dev_c);
return 0;
}
|
963424ef15d6e75b5f110f807f8a8acd5def2184.hip | // !!! This is a file automatically generated by hipify!!!
#include "ctranslate2/primitives/primitives.h"
#include <cmath>
#include <type_traits>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <thrust/iterator/discard_iterator.h>
#include <hipcub/hipcub.hpp>
#include "cuda/helpers.h"
#include "type_dispatch.h"
namespace ctranslate2 {
static hipcub::CachingDeviceAllocator& get_allocator() {
static const auto allocator_config = cuda::get_caching_allocator_config();
static thread_local hipcub::CachingDeviceAllocator allocator(allocator_config.bin_growth,
allocator_config.min_bin,
allocator_config.max_bin,
allocator_config.max_cached_bytes);
return allocator;
}
template<>
void primitives<Device::CUDA>::set_device(int index) {
CUDA_CHECK(hipSetDevice(index));
}
template<>
int primitives<Device::CUDA>::get_device() {
int index;
CUDA_CHECK(hipGetDevice(&index));
return index;
}
template<>
void* primitives<Device::CUDA>::alloc_data(dim_t size, int device_index) {
if (device_index < 0)
device_index = get_device();
void* data = nullptr;
CUDA_CHECK(get_allocator().DeviceAllocate(device_index, &data, size, cuda::get_cuda_stream()));
return data;
}
template<>
void primitives<Device::CUDA>::free_data(void* data, int device_index) {
CUDA_CHECK(get_allocator().DeviceFree(device_index, data));
}
template<>
void primitives<Device::CUDA>::clear_cache() {
CUDA_CHECK(get_allocator().FreeAllCached());
}
template<>
template <typename T>
T primitives<Device::CUDA>::deref(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill, x, x + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill, it, it + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T),
hipMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename U, typename V>
void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) {
THRUST_CALL(thrust::copy,
cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y));
}
template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t);
template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t);
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return T(THRUST_CALL(thrust::reduce,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::device_type<T>(),
cuda::plus<cuda::device_type<T>>()));
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::maximum<cuda::device_type<T>>());
return static_cast<dim_t>(max - cuda::device_cast(array));
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
return deref(array, max_element(array, size));
}
#if !CUDA_CAN_USE_HALF
namespace cuda {
template<>
struct maximum<thrust::tuple<__half, int32_t>> {
__host__ __device__ thrust::tuple<__half, int32_t>
operator()(const thrust::tuple<__half, int32_t>& lhs,
const thrust::tuple<__half, int32_t>& rhs) const {
const float lv = float(lhs.get<0>());
const float rv = float(rhs.get<0>());
if (rv > lv)
return rhs;
if (lv < rv)
return lhs;
return lhs.get<1>() < rhs.get<1>() ? rhs : lhs;
}
};
}
#endif
template<>
template <typename T>
void primitives<Device::CUDA>::row_max(const T* x,
const dim_t rows,
const dim_t cols,
T* values,
int32_t* indices) {
auto keys_it = thrust::make_transform_iterator(thrust::counting_iterator<int32_t>(0),
cuda::repeat_vec_depth<int32_t>(cols));
auto ids_it = thrust::make_transform_iterator(thrust::counting_iterator<int32_t>(0),
cuda::repeat_vec<int32_t>(cols));
THRUST_CALL(thrust::reduce_by_key,
keys_it, keys_it + (rows * cols),
thrust::make_zip_iterator(thrust::make_tuple(cuda::device_cast(x), ids_it)),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(thrust::make_tuple(cuda::device_cast(values), indices)),
thrust::equal_to<int32_t>(),
cuda::maximum<thrust::tuple<cuda::device_type<T>, int32_t>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::multiplies<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) {
max(T(0), x, y, size);
}
template void primitives<Device::CUDA>::relu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t);
struct gelu_func {
float _scale;
gelu_func(float scale)
: _scale(scale) {
}
__host__ __device__
float operator()(float x) {
return 0.5f * x * (1.f + tanhf(_scale * (x + 0.044715f * powf(x, 3.f))));
}
};
template<>
void primitives<Device::CUDA>::gelu(const float* x, float* y, dim_t size) {
static const float pi = std::acos(-1.f);
static const float scale = std::sqrt(2.f / pi);
cuda::unary_transform(x, y, size, gelu_func(scale));
}
template <typename T>
struct perm_indices_2d {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template <typename T>
__global__ void transpose_0213(const T* in,
const dim_t rows,
const dim_t cols,
const dim_t stride1,
const dim_t stride2,
T* out) {
const dim_t stride = stride1 * stride2;
for (dim_t j = blockIdx.x; j < rows; j += gridDim.x) {
const dim_t z = j / stride;
const dim_t y = (j % stride) / stride1;
const dim_t x = (j % stride) % stride1;
const dim_t j2 = z * stride + x * stride2 + y;
const T* row_in = in + j2 * cols;
T* row_out = out + j * cols;
for (dim_t i = threadIdx.x; i < cols; i += blockDim.x) {
row_out[i] = row_in[i];
}
}
}
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) {
// Optimize the permutation used in multi-head attention.
const dim_t rows = dims[0] * dims[1] * dims[2];
const dim_t cols = dims[3];
const dim_t blocks = ::min(rows, cuda::max_blocks);
const dim_t threads = ::min(cols, cuda::max_threads);
hipLaunchKernelGGL(( transpose_0213), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), a,
rows,
cols,
dims[1],
dims[2],
b);
return;
}
cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c,
const float*) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float16_t* a, const float16_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c,
const float16_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, HIP_R_16F, ldb,
a, HIP_R_16F, lda,
&beta_h,
c, HIP_R_16F, ldc,
HIP_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c,
const int32_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, HIP_R_8I, ldb,
a, HIP_R_8I, lda,
&beta_i,
c, HIP_R_32I, ldc,
HIP_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float16_t* a, const float16_t* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmStridedBatchedEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, HIP_R_16F, ldb, strideb,
a, HIP_R_16F, lda, stridea,
&beta_h,
c, HIP_R_16F, ldc, stridec,
batch_size,
HIP_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
struct exp_func {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, exp_func());
}
struct log_func {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, log_func());
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::deref(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::row_max(const T* x, \
const dim_t rows, \
const dim_t cols, \
T* values, \
int32_t* indices); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
| 963424ef15d6e75b5f110f807f8a8acd5def2184.cu | #include "ctranslate2/primitives/primitives.h"
#include <cmath>
#include <type_traits>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <thrust/iterator/discard_iterator.h>
#include <cub/util_allocator.cuh>
#include "cuda/helpers.h"
#include "type_dispatch.h"
namespace ctranslate2 {
static cub::CachingDeviceAllocator& get_allocator() {
static const auto allocator_config = cuda::get_caching_allocator_config();
static thread_local cub::CachingDeviceAllocator allocator(allocator_config.bin_growth,
allocator_config.min_bin,
allocator_config.max_bin,
allocator_config.max_cached_bytes);
return allocator;
}
template<>
void primitives<Device::CUDA>::set_device(int index) {
CUDA_CHECK(cudaSetDevice(index));
}
template<>
int primitives<Device::CUDA>::get_device() {
int index;
CUDA_CHECK(cudaGetDevice(&index));
return index;
}
template<>
void* primitives<Device::CUDA>::alloc_data(dim_t size, int device_index) {
if (device_index < 0)
device_index = get_device();
void* data = nullptr;
CUDA_CHECK(get_allocator().DeviceAllocate(device_index, &data, size, cuda::get_cuda_stream()));
return data;
}
template<>
void primitives<Device::CUDA>::free_data(void* data, int device_index) {
CUDA_CHECK(get_allocator().DeviceFree(device_index, data));
}
template<>
void primitives<Device::CUDA>::clear_cache() {
CUDA_CHECK(get_allocator().FreeAllCached());
}
template<>
template <typename T>
T primitives<Device::CUDA>::deref(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill, x, x + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill, it, it + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T),
cudaMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename U, typename V>
void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) {
THRUST_CALL(thrust::copy,
cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y));
}
template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t);
template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t);
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return T(THRUST_CALL(thrust::reduce,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::device_type<T>(),
cuda::plus<cuda::device_type<T>>()));
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::maximum<cuda::device_type<T>>());
return static_cast<dim_t>(max - cuda::device_cast(array));
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
return deref(array, max_element(array, size));
}
#if !CUDA_CAN_USE_HALF
namespace cuda {
template<>
struct maximum<thrust::tuple<__half, int32_t>> {
__host__ __device__ thrust::tuple<__half, int32_t>
operator()(const thrust::tuple<__half, int32_t>& lhs,
const thrust::tuple<__half, int32_t>& rhs) const {
const float lv = float(lhs.get<0>());
const float rv = float(rhs.get<0>());
if (rv > lv)
return rhs;
if (lv < rv)
return lhs;
return lhs.get<1>() < rhs.get<1>() ? rhs : lhs;
}
};
}
#endif
template<>
template <typename T>
void primitives<Device::CUDA>::row_max(const T* x,
const dim_t rows,
const dim_t cols,
T* values,
int32_t* indices) {
auto keys_it = thrust::make_transform_iterator(thrust::counting_iterator<int32_t>(0),
cuda::repeat_vec_depth<int32_t>(cols));
auto ids_it = thrust::make_transform_iterator(thrust::counting_iterator<int32_t>(0),
cuda::repeat_vec<int32_t>(cols));
THRUST_CALL(thrust::reduce_by_key,
keys_it, keys_it + (rows * cols),
thrust::make_zip_iterator(thrust::make_tuple(cuda::device_cast(x), ids_it)),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(thrust::make_tuple(cuda::device_cast(values), indices)),
thrust::equal_to<int32_t>(),
cuda::maximum<thrust::tuple<cuda::device_type<T>, int32_t>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::multiplies<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) {
max(T(0), x, y, size);
}
template void primitives<Device::CUDA>::relu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t);
struct gelu_func {
float _scale;
gelu_func(float scale)
: _scale(scale) {
}
__host__ __device__
float operator()(float x) {
return 0.5f * x * (1.f + tanhf(_scale * (x + 0.044715f * powf(x, 3.f))));
}
};
template<>
void primitives<Device::CUDA>::gelu(const float* x, float* y, dim_t size) {
static const float pi = std::acos(-1.f);
static const float scale = std::sqrt(2.f / pi);
cuda::unary_transform(x, y, size, gelu_func(scale));
}
template <typename T>
struct perm_indices_2d {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template <typename T>
__global__ void transpose_0213(const T* in,
const dim_t rows,
const dim_t cols,
const dim_t stride1,
const dim_t stride2,
T* out) {
const dim_t stride = stride1 * stride2;
for (dim_t j = blockIdx.x; j < rows; j += gridDim.x) {
const dim_t z = j / stride;
const dim_t y = (j % stride) / stride1;
const dim_t x = (j % stride) % stride1;
const dim_t j2 = z * stride + x * stride2 + y;
const T* row_in = in + j2 * cols;
T* row_out = out + j * cols;
for (dim_t i = threadIdx.x; i < cols; i += blockDim.x) {
row_out[i] = row_in[i];
}
}
}
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) {
// Optimize the permutation used in multi-head attention.
const dim_t rows = dims[0] * dims[1] * dims[2];
const dim_t cols = dims[3];
const dim_t blocks = std::min(rows, cuda::max_blocks);
const dim_t threads = std::min(cols, cuda::max_threads);
transpose_0213<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(a,
rows,
cols,
dims[1],
dims[2],
b);
return;
}
cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c,
const float*) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float16_t* a, const float16_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c,
const float16_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, CUDA_R_16F, ldb,
a, CUDA_R_16F, lda,
&beta_h,
c, CUDA_R_16F, ldc,
CUDA_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c,
const int32_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, CUDA_R_8I, ldb,
a, CUDA_R_8I, lda,
&beta_i,
c, CUDA_R_32I, ldc,
CUDA_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float16_t* a, const float16_t* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmStridedBatchedEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, CUDA_R_16F, ldb, strideb,
a, CUDA_R_16F, lda, stridea,
&beta_h,
c, CUDA_R_16F, ldc, stridec,
batch_size,
CUDA_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
struct exp_func {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, exp_func());
}
struct log_func {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, log_func());
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::deref(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::row_max(const T* x, \
const dim_t rows, \
const dim_t cols, \
T* values, \
int32_t* indices); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
|
909f2c441428342e429b7a10fac43169cfc86344.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "optixPagingImpl.cpp"
__host__ void optixPagingPullRequests( OptixPagingContext* context,
unsigned int* devRequestedPages,
unsigned int numRequestedPages,
PageMapping* devStalePages,
unsigned int numStalePages,
unsigned int* devEvictablePages,
unsigned int numEvictablePages,
unsigned int* devNumPagesReturned )
{
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( devRequestedPages, 0, numRequestedPages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( devStalePages, 0, numStalePages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( devEvictablePages, 0, numEvictablePages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( devNumPagesReturned, 0, 3 * sizeof( unsigned int ) ) );
int numPagesPerThread = context->maxVaSizeInPages / 65536;
numPagesPerThread = ( numPagesPerThread + 31 ) & 0xFFFFFFE0; // Round to nearest multiple of 32
if( numPagesPerThread < 32 )
numPagesPerThread = 32;
const int numThreadsPerBlock = 64;
const int numPagesPerBlock = numPagesPerThread * numThreadsPerBlock;
const int numBlocks = ( context->maxVaSizeInPages + ( numPagesPerBlock - 1 ) ) / numPagesPerBlock;
hipLaunchKernelGGL(( devicePullRequests), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, context->usageBits, context->residenceBits, context->pageTable,
context->maxVaSizeInPages, devRequestedPages,
numRequestedPages, devNumPagesReturned, devStalePages,
numStalePages, devNumPagesReturned + 1, devEvictablePages,
numEvictablePages, devNumPagesReturned + 2 );
}
__host__ void optixPagingPushMappings( OptixPagingContext* context,
PageMapping* devFilledPages,
int filledPageCount,
unsigned int* devInvalidatedPages,
int invalidatedPageCount )
{
// Zero out the reference bits
unsigned int referenceBitsSizeInBytes =
sizeof( unsigned int ) * static_cast<unsigned int>( context->residenceBits - context->usageBits );
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( context->usageBits, 0, referenceBitsSizeInBytes ) );
const int numPagesPerThread = 2;
const int numThreadsPerBlock = 128;
const int numPagesPerBlock = numPagesPerThread * numThreadsPerBlock;
if( filledPageCount != 0 )
{
const int numFilledPageBlocks = ( filledPageCount + numPagesPerBlock - 1 ) / numPagesPerBlock;
hipLaunchKernelGGL(( deviceFillPages), dim3(numFilledPageBlocks), dim3(numThreadsPerBlock), 0, 0, context->pageTable, context->residenceBits,
devFilledPages, filledPageCount );
}
if( invalidatedPageCount != 0 )
{
const int numInvalidatedPageBlocks = ( invalidatedPageCount + numPagesPerBlock - 1 ) / numPagesPerBlock;
hipLaunchKernelGGL(( deviceInvalidatePages), dim3(numInvalidatedPageBlocks), dim3(numThreadsPerBlock), 0, 0, context->residenceBits, devInvalidatedPages,
invalidatedPageCount );
}
}
__host__ void optixPagingCreate( OptixPagingOptions* options, OptixPagingContext** context )
{
*context = new OptixPagingContext;
( *context )->maxVaSizeInPages = options->maxVaSizeInPages;
( *context )->usageBits = nullptr;
( *context )->pageTable = nullptr;
}
__host__ void optixPagingDestroy( OptixPagingContext* context )
{
delete context;
}
__host__ void optixPagingCalculateSizes( unsigned int vaSizeInPages, OptixPagingSizes& sizes )
{
//TODO: decide on limit for sizes, add asserts
// Number of entries * 8 bytes per entry
sizes.pageTableSizeInBytes = vaSizeInPages * sizeof( unsigned long long );
// Calc reference bits size with 128 byte alignnment, residence bits are same size.
// Usage bits is the concatenation of the two.
unsigned int referenceBitsSizeInBytes = ( ( vaSizeInPages + 1023 ) & 0xFFFFFC00 ) / 8;
unsigned int residenceBitsSizeInBytes = referenceBitsSizeInBytes;
sizes.usageBitsSizeInBytes = referenceBitsSizeInBytes + residenceBitsSizeInBytes;
}
__host__ void optixPagingSetup( OptixPagingContext* context, const OptixPagingSizes& sizes, int numWorkers )
{
// TODO: decide on limit for numWorkers, add asserts
// This doubles as a memset and a check to make sure they allocated the device pointers
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( context->pageTable, 0, sizes.pageTableSizeInBytes ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( hipMemset( context->usageBits, 0, sizes.usageBitsSizeInBytes * numWorkers ) );
// Set residence bits pointer in context (index half way into usage bits)
context->residenceBits = context->usageBits + ( sizes.usageBitsSizeInBytes / sizeof( unsigned int ) / 2 );
}
| 909f2c441428342e429b7a10fac43169cfc86344.cu | //
// Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "optixPagingImpl.cpp"
__host__ void optixPagingPullRequests( OptixPagingContext* context,
unsigned int* devRequestedPages,
unsigned int numRequestedPages,
PageMapping* devStalePages,
unsigned int numStalePages,
unsigned int* devEvictablePages,
unsigned int numEvictablePages,
unsigned int* devNumPagesReturned )
{
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( devRequestedPages, 0, numRequestedPages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( devStalePages, 0, numStalePages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( devEvictablePages, 0, numEvictablePages * sizeof( unsigned int ) ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( devNumPagesReturned, 0, 3 * sizeof( unsigned int ) ) );
int numPagesPerThread = context->maxVaSizeInPages / 65536;
numPagesPerThread = ( numPagesPerThread + 31 ) & 0xFFFFFFE0; // Round to nearest multiple of 32
if( numPagesPerThread < 32 )
numPagesPerThread = 32;
const int numThreadsPerBlock = 64;
const int numPagesPerBlock = numPagesPerThread * numThreadsPerBlock;
const int numBlocks = ( context->maxVaSizeInPages + ( numPagesPerBlock - 1 ) ) / numPagesPerBlock;
devicePullRequests<<<numBlocks, numThreadsPerBlock>>>( context->usageBits, context->residenceBits, context->pageTable,
context->maxVaSizeInPages, devRequestedPages,
numRequestedPages, devNumPagesReturned, devStalePages,
numStalePages, devNumPagesReturned + 1, devEvictablePages,
numEvictablePages, devNumPagesReturned + 2 );
}
__host__ void optixPagingPushMappings( OptixPagingContext* context,
PageMapping* devFilledPages,
int filledPageCount,
unsigned int* devInvalidatedPages,
int invalidatedPageCount )
{
// Zero out the reference bits
unsigned int referenceBitsSizeInBytes =
sizeof( unsigned int ) * static_cast<unsigned int>( context->residenceBits - context->usageBits );
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( context->usageBits, 0, referenceBitsSizeInBytes ) );
const int numPagesPerThread = 2;
const int numThreadsPerBlock = 128;
const int numPagesPerBlock = numPagesPerThread * numThreadsPerBlock;
if( filledPageCount != 0 )
{
const int numFilledPageBlocks = ( filledPageCount + numPagesPerBlock - 1 ) / numPagesPerBlock;
deviceFillPages<<<numFilledPageBlocks, numThreadsPerBlock>>>( context->pageTable, context->residenceBits,
devFilledPages, filledPageCount );
}
if( invalidatedPageCount != 0 )
{
const int numInvalidatedPageBlocks = ( invalidatedPageCount + numPagesPerBlock - 1 ) / numPagesPerBlock;
deviceInvalidatePages<<<numInvalidatedPageBlocks, numThreadsPerBlock>>>( context->residenceBits, devInvalidatedPages,
invalidatedPageCount );
}
}
__host__ void optixPagingCreate( OptixPagingOptions* options, OptixPagingContext** context )
{
*context = new OptixPagingContext;
( *context )->maxVaSizeInPages = options->maxVaSizeInPages;
( *context )->usageBits = nullptr;
( *context )->pageTable = nullptr;
}
__host__ void optixPagingDestroy( OptixPagingContext* context )
{
delete context;
}
__host__ void optixPagingCalculateSizes( unsigned int vaSizeInPages, OptixPagingSizes& sizes )
{
//TODO: decide on limit for sizes, add asserts
// Number of entries * 8 bytes per entry
sizes.pageTableSizeInBytes = vaSizeInPages * sizeof( unsigned long long );
// Calc reference bits size with 128 byte alignnment, residence bits are same size.
// Usage bits is the concatenation of the two.
unsigned int referenceBitsSizeInBytes = ( ( vaSizeInPages + 1023 ) & 0xFFFFFC00 ) / 8;
unsigned int residenceBitsSizeInBytes = referenceBitsSizeInBytes;
sizes.usageBitsSizeInBytes = referenceBitsSizeInBytes + residenceBitsSizeInBytes;
}
__host__ void optixPagingSetup( OptixPagingContext* context, const OptixPagingSizes& sizes, int numWorkers )
{
// TODO: decide on limit for numWorkers, add asserts
// This doubles as a memset and a check to make sure they allocated the device pointers
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( context->pageTable, 0, sizes.pageTableSizeInBytes ) );
OPTIX_PAGING_CHECK_CUDA_ERROR( cudaMemset( context->usageBits, 0, sizes.usageBitsSizeInBytes * numWorkers ) );
// Set residence bits pointer in context (index half way into usage bits)
context->residenceBits = context->usageBits + ( sizes.usageBitsSizeInBytes / sizeof( unsigned int ) / 2 );
}
|
8a508e5fb5797974f1bd9e0f174b66448e34a1ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
hipMallocManaged(&a, size);
int threadsPerBlock = 32;
int blocksPerGrid =((N + threadsPerBlock - 1)/ threadsPerBlock);
hostFunction(a,N);
hipLaunchKernelGGL((
deviceKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, a,N);
hipDeviceSynchronize();
/*
* Conduct experiments to learn more about the behavior of
* `hipMallocManaged`.
*
* What happens when unified memory is accessed only by the GPU?
* What happens when unified memory is accessed only by the CPU?
* What happens when unified memory is accessed first by the GPU then the CPU?
* What happens when unified memory is accessed first by the CPU then the GPU?
*
* Hypothesize about UM behavior, page faulting specificially, before each
* experiement, and then verify by running `nvprof`.
for (int i = 0; i < N; ++i)
{
if (a[i]!=1)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
hipFree(a);
}
| 8a508e5fb5797974f1bd9e0f174b66448e34a1ce.cu |
#include<stdio.h>
#include<cuda_runtime.h>
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
int threadsPerBlock = 32;
int blocksPerGrid =((N + threadsPerBlock - 1)/ threadsPerBlock);
hostFunction(a,N);
deviceKernel<<<blocksPerGrid,threadsPerBlock>>>(a,N);
cudaDeviceSynchronize();
/*
* Conduct experiments to learn more about the behavior of
* `cudaMallocManaged`.
*
* What happens when unified memory is accessed only by the GPU?
* What happens when unified memory is accessed only by the CPU?
* What happens when unified memory is accessed first by the GPU then the CPU?
* What happens when unified memory is accessed first by the CPU then the GPU?
*
* Hypothesize about UM behavior, page faulting specificially, before each
* experiement, and then verify by running `nvprof`.
for (int i = 0; i < N; ++i)
{
if (a[i]!=1)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
cudaFree(a);
}
|
8e2f8da82d706b95c9890b822cd8b3a2d0e69439.hip | // !!! This is a file automatically generated by hipify!!!
//************************************************************************************
//Kalman filter using CUDA
//
//Check https://en.wikipedia.org/wiki/Kalman_filter for more details about Kalman
//
//Created by Junkai Cheng, 2019/01/15
//***********************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <cstring>
#include <math.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "point.h"
#include "kalman_cuda.h"
__device__ float ele_multi(float* A, float* B, int Awidth, int Bwidth, int tx, int ty);
//some constant matrices to be used in this part
__device__ float H[8] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0
};
__device__ float HT[8] = {
1.0, 0,
0, 1.0,
0, 0,
0, 0
};
__device__ float A[16] = {
1.0, 0, Time, 0,
0, 1.0, 0, Time,
0, 0, 1.0, 0,
0, 0, 0, 1.0
};
__device__ float AT[16] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0,
Time, 0, 1.0, 0,
0, Time, 0, 1.0
};
__device__ float Q[16] = {
0, 0.01, 0, 0,
0.01, 0, 0, 0,
0, 0, 0.002, 0.01,
0, 0, 0.01, 0.001
};
__device__ float R[4] = {
0.01, 0.01,
0.01, 0.01
};
__device__ float I[16] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0,
0, 0, 1.0, 0,
0, 0, 0, 1.0
};
__global__ void PredictKernel(float* predictD, float* covD, float* new_predictD, float* new_covD, int point_num){
//Kernel function for the first two steps of Kalman Filter
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
__shared__ float temp[CovSize];
float value;
//caculate x_k' = A * x_{k-1}
if (tx < 1){
value = ele_multi(A, predictD + bx * PredictSize, 4, 1, tx, ty);
new_predictD[bx * PredictSize + ty] = value;
}
//calculate P_k' = A * P_{k-1} * A^T + Q
value = ele_multi(A, covD + bx * CovSize, 4, 4, tx, ty);
temp[ty * 4 + tx] = value;
__syncthreads();
value = ele_multi(temp, AT, 4, 4, tx, ty);
if (bx < point_num)
new_covD[bx * CovSize + ty * 4 + tx] = value + Q[ty * 4 + tx];
__syncthreads();
}
__global__ void UpdateKernel(float* dataD, float* predictD, float* covD, float* new_predictD, float* new_covD, int point_num, int ite_num){
//kernel functino for the left three steps of Kalman Filter
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
float value;
__shared__ float temp[CovSize];
//calculate H*P
if (ty < 2){
value = ele_multi(H, covD + bx*CovSize, 4, 4, tx, ty);
temp[ty * 4 + tx] = value;
}
__syncthreads();
//calculate H*P_k*H^T + R
__shared__ float temp2[PredictSize];
if (ty < 2 && tx < 2){
value = ele_multi(temp, HT, 4, 2, tx, ty);
temp2[ty * 2 + tx] = value + R[ty * 2 + tx];
}
//calculate P_k* H^T
__shared__ float temp3[8];
if (tx < 2){
value = ele_multi(covD + bx*CovSize, HT, 4, 2, tx, ty);
temp3[ty * 2 + tx] = value;
}
__syncthreads();
//calculate K
__shared__ float K[8];
float det = temp2[0] * temp2[3] - temp2[2] * temp2[1];
__shared__ float temp2_inv[4];
temp2_inv[0] = 1.0f / det * temp2[3];
temp2_inv[1] = -1.0f / det * temp2[1];
temp2_inv[2] = -1.0f / det * temp2[2];
temp2_inv[3] = 1.0f / det * temp2[0];
if (tx < 2){
value = ele_multi(temp3, temp2_inv, 2, 2, tx, ty);
K[ty * 2 + tx] = value;
}
//calculate z_k - H*x_k'
__shared__ float temp4[8];
if (tx < 1 && ty < 2){
value = ele_multi(H, predictD + bx * PredictSize, 4, 1, tx, ty);
temp4[ty] = dataD[MeasureSize * bx + ty] - value;
}
__syncthreads();
//calculate x_k
if (tx < 1){
value = ele_multi(K, temp4, 2, 1, tx, ty);
new_predictD[bx * PredictSize + ty] = predictD[bx * PredictSize + ty] + value;
}
//caculate I-K*H
__shared__ float temp5[CovSize];
value = ele_multi(K, H, 2, 4, tx, ty);
temp5[ty * 4 + tx] = I[ty * 4 + tx] - value;
__syncthreads();
//calculate P_k
value = ele_multi(temp5, covD + bx*CovSize, 4, 4, tx, ty);
new_covD[bx * PredictSize + ty * 4 + tx] = value;
__syncthreads();
}
void predict_single(float* predict, float* covD, float* new_predict, float* new_covD, int point_num, float delta_t){
//the first two steps of Kalman Filter
float* predictD, *new_predictD;
hipMalloc(&predictD, point_num* PredictSize* sizeof(float));
hipMalloc(&new_predictD, point_num* PredictSize* sizeof(float));
hipMemcpy(predictD, predict, point_num*PredictSize*sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(4, 4);
dim3 dimGrid(point_num, 1);
PredictKernel << <dimGrid, dimBlock >> >(predictD, covD, new_predictD, new_covD, point_num);
// After this step, data in PredictData is x', data in Covariance is P'
hipMemcpy(new_predict, new_predictD, point_num*PredictSize*sizeof(float), hipMemcpyDeviceToHost);
hipFree(predictD);
hipFree(new_predictD);
}
void update_single(float* data, float* predict, float* covD, float* new_predict, float* new_covD, int point_num, float delta_t, int ite_num){
//the left three steps of Kalman Filter
float* predictD, *new_predictD, *dataD;
hipMalloc(&predictD, point_num* PredictSize* sizeof(float));
hipMalloc(&new_predictD, point_num* PredictSize* sizeof(float));
hipMalloc(&dataD, point_num * 2 * sizeof(float));
hipMemcpy(predictD, predict, point_num*PredictSize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dataD, data, point_num * 2 * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(4, 4);
dim3 dimGrid(point_num, 1);
UpdateKernel << <dimGrid, dimBlock >> >(dataD, predictD, covD, new_predictD, new_covD, point_num, ite_num);
// After this step, data in PredictData is x, data in Covariance is P
hipMemcpy(new_predict, new_predictD, point_num*PredictSize*sizeof(float), hipMemcpyDeviceToHost);
hipFree(predictD);
hipFree(new_predictD);
hipFree(dataD);
}
__device__ float ele_multi(float* A, float* B, int Awidth, int Bwidth, int tx, int ty){
//calculate one element of the product of two matrices
float Pvalue = 0;
for (int k = 0; k < Awidth; ++k){
float Melement = A[ty * Awidth + k];
float Nelement = B[k * Bwidth + tx];
Pvalue += Melement * Nelement;
}
return Pvalue;
} | 8e2f8da82d706b95c9890b822cd8b3a2d0e69439.cu | //************************************************************************************
//Kalman filter using CUDA
//
//Check https://en.wikipedia.org/wiki/Kalman_filter for more details about Kalman
//
//Created by Junkai Cheng, 2019/01/15
//***********************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <cstring>
#include <math.h>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "point.h"
#include "kalman_cuda.h"
__device__ float ele_multi(float* A, float* B, int Awidth, int Bwidth, int tx, int ty);
//some constant matrices to be used in this part
__device__ float H[8] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0
};
__device__ float HT[8] = {
1.0, 0,
0, 1.0,
0, 0,
0, 0
};
__device__ float A[16] = {
1.0, 0, Time, 0,
0, 1.0, 0, Time,
0, 0, 1.0, 0,
0, 0, 0, 1.0
};
__device__ float AT[16] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0,
Time, 0, 1.0, 0,
0, Time, 0, 1.0
};
__device__ float Q[16] = {
0, 0.01, 0, 0,
0.01, 0, 0, 0,
0, 0, 0.002, 0.01,
0, 0, 0.01, 0.001
};
__device__ float R[4] = {
0.01, 0.01,
0.01, 0.01
};
__device__ float I[16] = {
1.0, 0, 0, 0,
0, 1.0, 0, 0,
0, 0, 1.0, 0,
0, 0, 0, 1.0
};
__global__ void PredictKernel(float* predictD, float* covD, float* new_predictD, float* new_covD, int point_num){
//Kernel function for the first two steps of Kalman Filter
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
__shared__ float temp[CovSize];
float value;
//caculate x_k' = A * x_{k-1}
if (tx < 1){
value = ele_multi(A, predictD + bx * PredictSize, 4, 1, tx, ty);
new_predictD[bx * PredictSize + ty] = value;
}
//calculate P_k' = A * P_{k-1} * A^T + Q
value = ele_multi(A, covD + bx * CovSize, 4, 4, tx, ty);
temp[ty * 4 + tx] = value;
__syncthreads();
value = ele_multi(temp, AT, 4, 4, tx, ty);
if (bx < point_num)
new_covD[bx * CovSize + ty * 4 + tx] = value + Q[ty * 4 + tx];
__syncthreads();
}
__global__ void UpdateKernel(float* dataD, float* predictD, float* covD, float* new_predictD, float* new_covD, int point_num, int ite_num){
//kernel functino for the left three steps of Kalman Filter
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
float value;
__shared__ float temp[CovSize];
//calculate H*P
if (ty < 2){
value = ele_multi(H, covD + bx*CovSize, 4, 4, tx, ty);
temp[ty * 4 + tx] = value;
}
__syncthreads();
//calculate H*P_k*H^T + R
__shared__ float temp2[PredictSize];
if (ty < 2 && tx < 2){
value = ele_multi(temp, HT, 4, 2, tx, ty);
temp2[ty * 2 + tx] = value + R[ty * 2 + tx];
}
//calculate P_k* H^T
__shared__ float temp3[8];
if (tx < 2){
value = ele_multi(covD + bx*CovSize, HT, 4, 2, tx, ty);
temp3[ty * 2 + tx] = value;
}
__syncthreads();
//calculate K
__shared__ float K[8];
float det = temp2[0] * temp2[3] - temp2[2] * temp2[1];
__shared__ float temp2_inv[4];
temp2_inv[0] = 1.0f / det * temp2[3];
temp2_inv[1] = -1.0f / det * temp2[1];
temp2_inv[2] = -1.0f / det * temp2[2];
temp2_inv[3] = 1.0f / det * temp2[0];
if (tx < 2){
value = ele_multi(temp3, temp2_inv, 2, 2, tx, ty);
K[ty * 2 + tx] = value;
}
//calculate z_k - H*x_k'
__shared__ float temp4[8];
if (tx < 1 && ty < 2){
value = ele_multi(H, predictD + bx * PredictSize, 4, 1, tx, ty);
temp4[ty] = dataD[MeasureSize * bx + ty] - value;
}
__syncthreads();
//calculate x_k
if (tx < 1){
value = ele_multi(K, temp4, 2, 1, tx, ty);
new_predictD[bx * PredictSize + ty] = predictD[bx * PredictSize + ty] + value;
}
//caculate I-K*H
__shared__ float temp5[CovSize];
value = ele_multi(K, H, 2, 4, tx, ty);
temp5[ty * 4 + tx] = I[ty * 4 + tx] - value;
__syncthreads();
//calculate P_k
value = ele_multi(temp5, covD + bx*CovSize, 4, 4, tx, ty);
new_covD[bx * PredictSize + ty * 4 + tx] = value;
__syncthreads();
}
void predict_single(float* predict, float* covD, float* new_predict, float* new_covD, int point_num, float delta_t){
//the first two steps of Kalman Filter
float* predictD, *new_predictD;
cudaMalloc(&predictD, point_num* PredictSize* sizeof(float));
cudaMalloc(&new_predictD, point_num* PredictSize* sizeof(float));
cudaMemcpy(predictD, predict, point_num*PredictSize*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(4, 4);
dim3 dimGrid(point_num, 1);
PredictKernel << <dimGrid, dimBlock >> >(predictD, covD, new_predictD, new_covD, point_num);
// After this step, data in PredictData is x', data in Covariance is P'
cudaMemcpy(new_predict, new_predictD, point_num*PredictSize*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(predictD);
cudaFree(new_predictD);
}
void update_single(float* data, float* predict, float* covD, float* new_predict, float* new_covD, int point_num, float delta_t, int ite_num){
//the left three steps of Kalman Filter
float* predictD, *new_predictD, *dataD;
cudaMalloc(&predictD, point_num* PredictSize* sizeof(float));
cudaMalloc(&new_predictD, point_num* PredictSize* sizeof(float));
cudaMalloc(&dataD, point_num * 2 * sizeof(float));
cudaMemcpy(predictD, predict, point_num*PredictSize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dataD, data, point_num * 2 * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(4, 4);
dim3 dimGrid(point_num, 1);
UpdateKernel << <dimGrid, dimBlock >> >(dataD, predictD, covD, new_predictD, new_covD, point_num, ite_num);
// After this step, data in PredictData is x, data in Covariance is P
cudaMemcpy(new_predict, new_predictD, point_num*PredictSize*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(predictD);
cudaFree(new_predictD);
cudaFree(dataD);
}
__device__ float ele_multi(float* A, float* B, int Awidth, int Bwidth, int tx, int ty){
//calculate one element of the product of two matrices
float Pvalue = 0;
for (int k = 0; k < Awidth; ++k){
float Melement = A[ty * Awidth + k];
float Nelement = B[k * Bwidth + tx];
Pvalue += Melement * Nelement;
}
return Pvalue;
} |
a82e70e67ad1a0445d458786a64e5b59742913fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu
//
// ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
//
// COPYRIGHT
//
// All contributions by the University of California:
// Copyright (c) 2014, 2015, The Regents of the University of California
// (Regents)
// All rights reserved.
//
// All other contributions:
// Copyright (c) 2014, 2015, the respective contributors
// All rights reserved.
//
// Caffe uses a shared copyright model: each contributor holds copyright over
// their contributions to Caffe. The project versioning records all such
// contribution and copyright details. If a contributor wants to further mark
// their specific copyright on a particular contribution, they should indicate
// their copyright solely in the commit message of the change when it is
// committed.
//
// LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// CONTRIBUTION AGREEMENT
//
// By contributing to the BVLC/caffe repository through pull-request, comment,
// or otherwise, the contributor releases their content to the
// license and copyright terms herein.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "ps_roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void PSRoIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int output_dim,
const int group_size,
T* top_data,
int* mapping_channel) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
round(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
round(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
round(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSRoIPoolBackward(
const int nthreads,
const T* top_diff,
const int* mapping_channel,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
round(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
round(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
round(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph)* bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
} // namespace
template<>
bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // PSRoI pooled data
auto* A = Output(1); // mapping_channel
Y->Resize(R.dim32(0), output_dim_, pooled_height_, pooled_width_);
A->Resize(Y->dims());
int output_size = Y->size();
hipLaunchKernelGGL(( PSRoIPoolForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_,
group_size_, Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // mapping channels
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
hipLaunchKernelGGL(( PSRoIPoolBackward<float>), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
output_dim_, dX->mutable_data<float>(), R.data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(PSRoIPool,
PSRoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PSRoIPoolGradient,
PSRoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
| a82e70e67ad1a0445d458786a64e5b59742913fd.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu
//
// ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
//
// COPYRIGHT
//
// All contributions by the University of California:
// Copyright (c) 2014, 2015, The Regents of the University of California
// (Regents)
// All rights reserved.
//
// All other contributions:
// Copyright (c) 2014, 2015, the respective contributors
// All rights reserved.
//
// Caffe uses a shared copyright model: each contributor holds copyright over
// their contributions to Caffe. The project versioning records all such
// contribution and copyright details. If a contributor wants to further mark
// their specific copyright on a particular contribution, they should indicate
// their copyright solely in the commit message of the change when it is
// committed.
//
// LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// CONTRIBUTION AGREEMENT
//
// By contributing to the BVLC/caffe repository through pull-request, comment,
// or otherwise, the contributor releases their content to the
// license and copyright terms herein.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "ps_roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void PSRoIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int output_dim,
const int group_size,
T* top_data,
int* mapping_channel) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
round(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
round(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
round(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSRoIPoolBackward(
const int nthreads,
const T* top_diff,
const int* mapping_channel,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
round(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
round(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
round(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph)* bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
} // namespace
template<>
bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // PSRoI pooled data
auto* A = Output(1); // mapping_channel
Y->Resize(R.dim32(0), output_dim_, pooled_height_, pooled_width_);
A->Resize(Y->dims());
int output_size = Y->size();
PSRoIPoolForward<float><<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_,
group_size_, Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // mapping channels
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
PSRoIPoolBackward<float><<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
output_dim_, dX->mutable_data<float>(), R.data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(PSRoIPool,
PSRoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PSRoIPoolGradient,
PSRoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
|
dcdd7b9525576a0f4c92142b488d3631b1c0dd41.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <vector>
#include <sstream>
#include <cmath>
#include <mpi.h> //activate mpi
#include "dg/algorithm.h"
#include "dg/backend/timer.cuh"
#include "dg/backend/xspacelib.cuh"
#include "dg/backend/interpolation.cuh"
#include "netcdf_par.h"
#include "file/read_input.h"
#include "file/nc_utilities.h"
#include "feltor.cuh"
/*
- the only difference to the feltor_hpc.cu file is that this program
uses the MPI backend and
the parallel netcdf output
- pay attention that both the grid dimensions as well as the
output dimensions must be divisible by the mpi process numbers
*/
typedef dg::MPI_FieldAligned< dg::CylindricalMPIGrid<dg::MDVec>, dg::IDMatrix,dg::BijectiveComm< dg::iDVec, dg::DVec >, dg::DVec> DFA;
int main( int argc, char* argv[])
{
////////////////////////////////setup MPI///////////////////////////////
int provided;
MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided);
if( provided != MPI_THREAD_FUNNELED)
{
std::cerr << "wrong mpi-thread environment provided!\n";
return -1;
}
int periods[3] = {false, false, true}; //non-, non-, periodic
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
hipGetDeviceCount(&num_devices);
if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;}
int device = rank % num_devices; //assume # of gpus/node is fixed
hipSetDevice( device);
#endif//cuda
int np[3];
if(rank==0)
{
std::cin>> np[0] >> np[1] >>np[2];
std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl;
assert( size == np[0]*np[1]*np[2]);
}
MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm);
////////////////////////Parameter initialisation//////////////////////////
std::vector<double> v,v3;
std::string input, geom;
if( argc != 4)
{
if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n";
return -1;
}
else
{
try{
input = file::read_file( argv[1]);
geom = file::read_file( argv[2]);
v = file::read_input( argv[1]);
v3 = file::read_input( argv[2]);
}catch( toefl::Message& m){
if(rank==0)m.display();
if(rank==0) std::cout << input << std::endl;
if(rank==0) std::cout << geom << std::endl;
return -1;
}
}
const eule::Parameters p( v);
if(rank==0) p.display( std::cout);
const solovev::GeomParameters gp(v3);
if(rank==0) gp.display( std::cout);
////////////////////////////////set up computations///////////////////////////
double Rmin=gp.R_0-p.boxscaleRm*gp.a;
double Zmin=-p.boxscaleZm*gp.a*gp.elongation;
double Rmax=gp.R_0+p.boxscaleRp*gp.a;
double Zmax=p.boxscaleZp*gp.a*gp.elongation;
//Make grids
dg::CylindricalMPIGrid<dg::MDVec> grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm);
dg::CylindricalMPIGrid<dg::MDVec> grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm);
//create RHS
if(rank==0)std::cout << "Constructing Feltor...\n";
eule::Feltor<dg::CylindricalMPIGrid<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec> feltor( grid, p, gp); //initialize before rolkar!
if(rank==0)std::cout << "Constructing Rolkar...\n";
eule::Rolkar< dg::CylindricalMPIGrid<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, feltor.ds(), feltor.dsDIR());
if(rank==0)std::cout << "Done!\n";
/////////////////////The initial field/////////////////////////////////////////
//background profile
solovev::Nprofile prof(p.bgprofamp, p.nprofileamp, gp); //initial background profile
std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0);
//perturbation
dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline
if( p.mode == 0 || p.mode == 1)
{
dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp);
if( p.mode == 0)
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1
if( p.mode == 1)
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1
}
if( p.mode == 2)
{
dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp);
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1);
}
if( p.mode == 3)
{
solovev::ZonalFlow init0(p.amp, p.k_psi, gp);
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1);
}
dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation
dg::blas1::plus(y0[1], -1); //initialize ni-1
if( p.mode == 2 || p.mode == 3)
{
dg::MDVec damping = dg::evaluate( solovev::GaussianProfXDamping( gp), grid);
dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp
}
feltor.initializene( y0[1], y0[0]);
dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0
dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0
dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time);
karniadakis.init( feltor, rolkar, y0, p.dt);
/////////////////////////////set up netcdf/////////////////////////////////
file::NC_Error_Handle err;
int ncid;
MPI_Info info = MPI_INFO_NULL;
err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON
// err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid);//MPI OFF
err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data());
err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data());
int dimids[4], tvarID;
{
err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global());
solovev::FieldR fieldR(gp);
solovev::FieldZ fieldZ(gp);
solovev::FieldP fieldP(gp);
dg::HVec vecR = dg::evaluate( fieldR, grid_out.global());
dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global());
dg::HVec vecP = dg::evaluate( fieldP, grid_out.global());
int vecID[3];
err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]);
err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]);
err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]);
err = nc_enddef( ncid);
err = nc_put_var_double( ncid, vecID[0], vecR.data());
err = nc_put_var_double( ncid, vecID[1], vecZ.data());
err = nc_put_var_double( ncid, vecID[2], vecP.data());
err = nc_redef(ncid);
}
//field IDs
std::string names[5] = {"electrons", "ions", "Ue", "Ui", "potential"};
int dataIDs[5]; //VARIABLE IDS
for( unsigned i=0; i<5; i++)
err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]);
//energy IDs
int EtimeID, EtimevarID;
err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID);
int energyID, massID, energyIDs[5], dissID, alignedID, dEdtID, accuracyID;
err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID);
err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID);
std::string energies[5] = {"Se", "Si", "Uperp", "Upare", "Upari"};
for( unsigned i=0; i<5; i++)
err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]);
err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID);
err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID);
err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID);
err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID);
//probe vars definition
int NepID,phipID;
err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID);
err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID);
for(unsigned i=0; i<5; i++)
{
err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE);
err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE);
}
err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, massID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE);
err = nc_enddef(ncid);
///////////////////////////////////PROBE//////////////////////////////
const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a);
const dg::HVec Zprobe(1,0.);
const dg::HVec Phiprobe(1,M_PI);
dg::IDMatrix probeinterp;
int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]);
if(rank==probeRANK)
probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU);
dg::DVec probevalue(1,0.);
///////////////////////////first output/////////////////////////////////
if(rank==0)std::cout << "First output ... \n";
int dims[3], coords[3];
MPI_Cart_get( comm, 3, dims, periods, coords);
size_t count[4] = {1, grid_out.Nz(), grid_out.n()*(grid_out.Ny()), grid_out.n()*(grid_out.Nx())};
size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]};
dg::MDVec transfer( dg::evaluate(dg::zero, grid));
dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local()));
dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local()));
dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix
for( unsigned i=0; i<4; i++)
{
dg::blas2::gemv( interpolate, y0[i].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() );
}
transfer = feltor.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data());
double time = 0;
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
err = nc_put_vara_double( ncid, EtimevarID, start, count, &time);
size_t Estart[] = {0};
size_t Ecount[] = {1};
double energy0 = feltor.energy(), mass0 = feltor.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.;
std::vector<double> evec = feltor.energy_vector();
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0);
for( unsigned i=0; i<5; i++)
err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss);
err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt);
err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy);
//probe
double Nep=0, phip=0;
if(rank==probeRANK) {
dg::blas2::gemv(probeinterp,y0[0].data(),probevalue);
Nep=probevalue[0] ;
dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue);
phip=probevalue[0] ;
}
MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator());
MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator());
err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep);
err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip);
if(rank==0)std::cout << "First write successful!\n";
///////////////////////////////////////Timeloop/////////////////////////////////
dg::Timer t;
t.tic();
#ifdef DG_BENCHMARK
unsigned step = 0;
#endif //DG_BENCHMARK
for( unsigned i=1; i<=p.maxout; i++)
{
#ifdef DG_BENCHMARK
dg::Timer ti;
ti.tic();
#endif//DG_BENCHMARK
for( unsigned j=0; j<p.itstp; j++)
{
try{ karniadakis( feltor, rolkar, y0);}
catch( dg::Fail& fail) {
if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n";
if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl;
err = nc_close(ncid);
MPI_Finalize();
return -1;
}
step++;
time+=p.dt;
Estart[0] = step;
E1 = feltor.energy(), mass = feltor.mass(), diss = feltor.energy_diffusion();
dEdt = (E1 - E0)/p.dt;
E0 = E1;
accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss));
evec = feltor.energy_vector();
err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time);
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass);
for( unsigned i=0; i<5; i++)
err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss);
err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt);
err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy);
if(rank==probeRANK)
{
dg::blas2::gemv(probeinterp,y0[0].data(),probevalue);
Nep= probevalue[0] ;
dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue);
phip=probevalue[0] ;
}
MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator());
MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator());
err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep);
err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip);
if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (feltor.mass()-mass0)/mass0<<"\t";
if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t";
if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n";
}
#ifdef DG_BENCHMARK
ti.toc();
if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time;
if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s";
ti.tic();
#endif//DG_BENCHMARK
//err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it
//////////////////////////write fields////////////////////////
start[0] = i;
for( unsigned j=0; j<4; j++)
{
dg::blas2::gemv( interpolate, y0[j].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data());
}
transfer = feltor.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
//err = nc_close(ncid); DONT DO IT!
#ifdef DG_BENCHMARK
ti.toc();
if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush;
#endif//DG_BENCHMARK
}
t.toc();
unsigned hour = (unsigned)floor(t.diff()/3600);
unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60);
double second = t.diff() - hour*3600 - minute*60;
if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0');
if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n";
if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n";
err = nc_close(ncid);
MPI_Finalize();
return 0;
}
| dcdd7b9525576a0f4c92142b488d3631b1c0dd41.cu | #include <iostream>
#include <iomanip>
#include <vector>
#include <sstream>
#include <cmath>
#include <mpi.h> //activate mpi
#include "dg/algorithm.h"
#include "dg/backend/timer.cuh"
#include "dg/backend/xspacelib.cuh"
#include "dg/backend/interpolation.cuh"
#include "netcdf_par.h"
#include "file/read_input.h"
#include "file/nc_utilities.h"
#include "feltor.cuh"
/*
- the only difference to the feltor_hpc.cu file is that this program
uses the MPI backend and
the parallel netcdf output
- pay attention that both the grid dimensions as well as the
output dimensions must be divisible by the mpi process numbers
*/
typedef dg::MPI_FieldAligned< dg::CylindricalMPIGrid<dg::MDVec>, dg::IDMatrix,dg::BijectiveComm< dg::iDVec, dg::DVec >, dg::DVec> DFA;
int main( int argc, char* argv[])
{
////////////////////////////////setup MPI///////////////////////////////
int provided;
MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided);
if( provided != MPI_THREAD_FUNNELED)
{
std::cerr << "wrong mpi-thread environment provided!\n";
return -1;
}
int periods[3] = {false, false, true}; //non-, non-, periodic
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
cudaGetDeviceCount(&num_devices);
if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;}
int device = rank % num_devices; //assume # of gpus/node is fixed
cudaSetDevice( device);
#endif//cuda
int np[3];
if(rank==0)
{
std::cin>> np[0] >> np[1] >>np[2];
std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl;
assert( size == np[0]*np[1]*np[2]);
}
MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm);
////////////////////////Parameter initialisation//////////////////////////
std::vector<double> v,v3;
std::string input, geom;
if( argc != 4)
{
if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n";
return -1;
}
else
{
try{
input = file::read_file( argv[1]);
geom = file::read_file( argv[2]);
v = file::read_input( argv[1]);
v3 = file::read_input( argv[2]);
}catch( toefl::Message& m){
if(rank==0)m.display();
if(rank==0) std::cout << input << std::endl;
if(rank==0) std::cout << geom << std::endl;
return -1;
}
}
const eule::Parameters p( v);
if(rank==0) p.display( std::cout);
const solovev::GeomParameters gp(v3);
if(rank==0) gp.display( std::cout);
////////////////////////////////set up computations///////////////////////////
double Rmin=gp.R_0-p.boxscaleRm*gp.a;
double Zmin=-p.boxscaleZm*gp.a*gp.elongation;
double Rmax=gp.R_0+p.boxscaleRp*gp.a;
double Zmax=p.boxscaleZp*gp.a*gp.elongation;
//Make grids
dg::CylindricalMPIGrid<dg::MDVec> grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm);
dg::CylindricalMPIGrid<dg::MDVec> grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm);
//create RHS
if(rank==0)std::cout << "Constructing Feltor...\n";
eule::Feltor<dg::CylindricalMPIGrid<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec> feltor( grid, p, gp); //initialize before rolkar!
if(rank==0)std::cout << "Constructing Rolkar...\n";
eule::Rolkar< dg::CylindricalMPIGrid<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, feltor.ds(), feltor.dsDIR());
if(rank==0)std::cout << "Done!\n";
/////////////////////The initial field/////////////////////////////////////////
//background profile
solovev::Nprofile prof(p.bgprofamp, p.nprofileamp, gp); //initial background profile
std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0);
//perturbation
dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline
if( p.mode == 0 || p.mode == 1)
{
dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp);
if( p.mode == 0)
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1
if( p.mode == 1)
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1
}
if( p.mode == 2)
{
dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp);
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1);
}
if( p.mode == 3)
{
solovev::ZonalFlow init0(p.amp, p.k_psi, gp);
y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1);
}
dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation
dg::blas1::plus(y0[1], -1); //initialize ni-1
if( p.mode == 2 || p.mode == 3)
{
dg::MDVec damping = dg::evaluate( solovev::GaussianProfXDamping( gp), grid);
dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp
}
feltor.initializene( y0[1], y0[0]);
dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0
dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0
dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time);
karniadakis.init( feltor, rolkar, y0, p.dt);
/////////////////////////////set up netcdf/////////////////////////////////
file::NC_Error_Handle err;
int ncid;
MPI_Info info = MPI_INFO_NULL;
err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON
// err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid);//MPI OFF
err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data());
err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data());
int dimids[4], tvarID;
{
err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global());
solovev::FieldR fieldR(gp);
solovev::FieldZ fieldZ(gp);
solovev::FieldP fieldP(gp);
dg::HVec vecR = dg::evaluate( fieldR, grid_out.global());
dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global());
dg::HVec vecP = dg::evaluate( fieldP, grid_out.global());
int vecID[3];
err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]);
err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]);
err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]);
err = nc_enddef( ncid);
err = nc_put_var_double( ncid, vecID[0], vecR.data());
err = nc_put_var_double( ncid, vecID[1], vecZ.data());
err = nc_put_var_double( ncid, vecID[2], vecP.data());
err = nc_redef(ncid);
}
//field IDs
std::string names[5] = {"electrons", "ions", "Ue", "Ui", "potential"};
int dataIDs[5]; //VARIABLE IDS
for( unsigned i=0; i<5; i++)
err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]);
//energy IDs
int EtimeID, EtimevarID;
err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID);
int energyID, massID, energyIDs[5], dissID, alignedID, dEdtID, accuracyID;
err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID);
err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID);
std::string energies[5] = {"Se", "Si", "Uperp", "Upare", "Upari"};
for( unsigned i=0; i<5; i++)
err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]);
err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID);
err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID);
err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID);
err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID);
//probe vars definition
int NepID,phipID;
err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID);
err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID);
for(unsigned i=0; i<5; i++)
{
err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE);
err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE);
}
err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, massID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE);
err = nc_enddef(ncid);
///////////////////////////////////PROBE//////////////////////////////
const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a);
const dg::HVec Zprobe(1,0.);
const dg::HVec Phiprobe(1,M_PI);
dg::IDMatrix probeinterp;
int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]);
if(rank==probeRANK)
probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU);
dg::DVec probevalue(1,0.);
///////////////////////////first output/////////////////////////////////
if(rank==0)std::cout << "First output ... \n";
int dims[3], coords[3];
MPI_Cart_get( comm, 3, dims, periods, coords);
size_t count[4] = {1, grid_out.Nz(), grid_out.n()*(grid_out.Ny()), grid_out.n()*(grid_out.Nx())};
size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]};
dg::MDVec transfer( dg::evaluate(dg::zero, grid));
dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local()));
dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local()));
dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix
for( unsigned i=0; i<4; i++)
{
dg::blas2::gemv( interpolate, y0[i].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() );
}
transfer = feltor.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data());
double time = 0;
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
err = nc_put_vara_double( ncid, EtimevarID, start, count, &time);
size_t Estart[] = {0};
size_t Ecount[] = {1};
double energy0 = feltor.energy(), mass0 = feltor.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.;
std::vector<double> evec = feltor.energy_vector();
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0);
for( unsigned i=0; i<5; i++)
err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss);
err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt);
err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy);
//probe
double Nep=0, phip=0;
if(rank==probeRANK) {
dg::blas2::gemv(probeinterp,y0[0].data(),probevalue);
Nep=probevalue[0] ;
dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue);
phip=probevalue[0] ;
}
MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator());
MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator());
err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep);
err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip);
if(rank==0)std::cout << "First write successful!\n";
///////////////////////////////////////Timeloop/////////////////////////////////
dg::Timer t;
t.tic();
#ifdef DG_BENCHMARK
unsigned step = 0;
#endif //DG_BENCHMARK
for( unsigned i=1; i<=p.maxout; i++)
{
#ifdef DG_BENCHMARK
dg::Timer ti;
ti.tic();
#endif//DG_BENCHMARK
for( unsigned j=0; j<p.itstp; j++)
{
try{ karniadakis( feltor, rolkar, y0);}
catch( dg::Fail& fail) {
if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n";
if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl;
err = nc_close(ncid);
MPI_Finalize();
return -1;
}
step++;
time+=p.dt;
Estart[0] = step;
E1 = feltor.energy(), mass = feltor.mass(), diss = feltor.energy_diffusion();
dEdt = (E1 - E0)/p.dt;
E0 = E1;
accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss));
evec = feltor.energy_vector();
err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time);
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass);
for( unsigned i=0; i<5; i++)
err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss);
err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt);
err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy);
if(rank==probeRANK)
{
dg::blas2::gemv(probeinterp,y0[0].data(),probevalue);
Nep= probevalue[0] ;
dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue);
phip=probevalue[0] ;
}
MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator());
MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator());
err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep);
err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip);
if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (feltor.mass()-mass0)/mass0<<"\t";
if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t";
if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n";
}
#ifdef DG_BENCHMARK
ti.toc();
if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time;
if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s";
ti.tic();
#endif//DG_BENCHMARK
//err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it
//////////////////////////write fields////////////////////////
start[0] = i;
for( unsigned j=0; j<4; j++)
{
dg::blas2::gemv( interpolate, y0[j].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data());
}
transfer = feltor.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
//err = nc_close(ncid); DONT DO IT!
#ifdef DG_BENCHMARK
ti.toc();
if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush;
#endif//DG_BENCHMARK
}
t.toc();
unsigned hour = (unsigned)floor(t.diff()/3600);
unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60);
double second = t.diff() - hour*3600 - minute*60;
if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0');
if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n";
if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n";
err = nc_close(ncid);
MPI_Finalize();
return 0;
}
|
5f7f1c5de376ad06656f8e3c598a882f4e827300.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2013 Yangqing Jia
#include <iostream> // NOLINT(readability/streams)
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PaddingForward(const int count, const Dtype* in, Dtype* out,
const int num, const int channel, const int height_in, const int width_in,
const int pad_h_top, const int pad_w_left, const int height_out, const int width_out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < count) {
int w = index % width_in;
index /= width_in;
int h = index % height_in;
index /= height_in;
int c = index % channel;
index /= channel;
out[((index * channel + c) * height_out + h + pad_h_top) * width_out + pad_w_left + w] =
in[((index * channel + c) * height_in + h) * width_in + w];
}
}
template <typename Dtype>
void PaddingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// First, set all data to be zero for the boundary pixels
CUDA_CHECK(hipMemset(top_data, 0, sizeof(Dtype) * top[0]->count()));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PaddingForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, num_, channel_, height_in_, width_in_,
pad_h_top_, pad_w_left_, height_out_, width_out_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PaddingBackward(const int count, const Dtype* in, Dtype* out,
const int num, const int channel, const int height_in, const int width_in,
const int pad_h_top, const int pad_w_left, const int height_out, const int width_out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < count) {
int w = index % width_in;
index /= width_in;
int h = index % height_in;
index /= height_in;
int c = index % channel;
index /= channel;
out[((index * channel + c) * height_in + h) * width_in + w] =
in[((index * channel + c) * height_out + h + pad_h_top) *
width_out + pad_w_left + w];
}
}
template <typename Dtype>
void PaddingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PaddingBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_diff, num_, channel_, height_in_, width_in_,
pad_h_top_, pad_w_left_, height_out_, width_out_);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PaddingLayer);
} // namespace caffe
| 5f7f1c5de376ad06656f8e3c598a882f4e827300.cu | // Copyright 2013 Yangqing Jia
#include <iostream> // NOLINT(readability/streams)
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PaddingForward(const int count, const Dtype* in, Dtype* out,
const int num, const int channel, const int height_in, const int width_in,
const int pad_h_top, const int pad_w_left, const int height_out, const int width_out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < count) {
int w = index % width_in;
index /= width_in;
int h = index % height_in;
index /= height_in;
int c = index % channel;
index /= channel;
out[((index * channel + c) * height_out + h + pad_h_top) * width_out + pad_w_left + w] =
in[((index * channel + c) * height_in + h) * width_in + w];
}
}
template <typename Dtype>
void PaddingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// First, set all data to be zero for the boundary pixels
CUDA_CHECK(cudaMemset(top_data, 0, sizeof(Dtype) * top[0]->count()));
// NOLINT_NEXT_LINE(whitespace/operators)
PaddingForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, num_, channel_, height_in_, width_in_,
pad_h_top_, pad_w_left_, height_out_, width_out_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PaddingBackward(const int count, const Dtype* in, Dtype* out,
const int num, const int channel, const int height_in, const int width_in,
const int pad_h_top, const int pad_w_left, const int height_out, const int width_out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < count) {
int w = index % width_in;
index /= width_in;
int h = index % height_in;
index /= height_in;
int c = index % channel;
index /= channel;
out[((index * channel + c) * height_in + h) * width_in + w] =
in[((index * channel + c) * height_out + h + pad_h_top) *
width_out + pad_w_left + w];
}
}
template <typename Dtype>
void PaddingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
PaddingBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_diff, num_, channel_, height_in_, width_in_,
pad_h_top_, pad_w_left_, height_out_, width_out_);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PaddingLayer);
} // namespace caffe
|
90582267b1c17b5ebafe4d8aa893813ba021912f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cstdio>
#define THREAD_PER_BLOCK 256
#define BLOCKS_PER_SM 4
//each thread can only have 64 registers
__device__ __forceinline__ float pow_fun(float x, float p) {
return __powf(x, p);
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO, int CI, int HW, float* __restrict__ output, float p) {
int b_hw = blockIdx.y * THREAD_PER_BLOCK + threadIdx.x;
int b = b_hw / HW;
int hw = b_hw % HW;
int g = blockIdx.x;
__shared__ float blockW[CO_div_G * CI_div_G];
for (int pos = threadIdx.x; pos < CI_div_G * CO_div_G; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[g * CO_div_G * CI_div_G + pos];
__syncthreads();
if (b >= B) return;
const int LOOP_CO = CO_div_G > 8 ? 8 : CO_div_G;
float r_max_x_sub_w[LOOP_CO], ans[LOOP_CO];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
#pragma unroll
for (int i = 0; i < LOOP_CO; i++)
r_max_x_sub_w[i] = 1e-10f;
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
r_max_x_sub_w[i] = max(r_max_x_sub_w[i], abs(x - w));
}
}
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
r_max_x_sub_w[i] = 1.0f / r_max_x_sub_w[i];
ans[i] = 1e-10f;
}
#pragma unroll(1)
for (int j = CI_div_G - 1; j >= 0; j--) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
ans[i] += pow_fun(abs(x - w) * r_max_x_sub_w[i], p);
}
}
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float res = __powf(ans[i], 1.0f / p) / r_max_x_sub_w[i];
output[(b * CO + g * CO_div_G + step + i) * HW + hw] = res;
}
}
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_backward_input_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO, int CI, int HW, float* __restrict__ grad_input, float p) {
int b_hw = blockIdx.y * THREAD_PER_BLOCK + threadIdx.x;
int b = b_hw / HW;
int hw = b_hw % HW;
int g = blockIdx.x;
__shared__ float blockW[CO_div_G * CI_div_G];
for (int pos = threadIdx.x; pos < CI_div_G * CO_div_G; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[g * CO_div_G * CI_div_G + pos];
__syncthreads();
if (b >= B) return;
const int LOOP_CO = CO_div_G > 8 ? 8 : CO_div_G;
float grad_out[LOOP_CO], r_out[LOOP_CO];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
grad_out[i] = grad_output[(b * CO + g * CO_div_G + step + i) * HW + hw];
r_out[i] = 1.0 / output[(b * CO + g * CO_div_G + step + i) * HW + hw];
}
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
float ans = 0.0f;
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
float t = x - w;
ans += grad_out[i] * pow_fun(abs(t) * r_out[i], p - 1) * (t > 0 ? 1 : -1);
}
if (step == 0) grad_input[(b * CI + g * CI_div_G + j) * HW + hw] = ans;
else grad_input[(b * CI + g * CI_div_G + j) * HW + hw] += ans;
}
}
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_backward_weight_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO, int CI, int HW, float* __restrict__ grad_weight, float p) {
const int LOOP_HW = 8;
const int LOOP_CO = CO_div_G > THREAD_PER_BLOCK / 64 ? THREAD_PER_BLOCK / 64 : CO_div_G;
int b_hw_start = blockIdx.y * 64 * LOOP_HW;
int g = blockIdx.x;
float grad_out[LOOP_HW], r_out[LOOP_HW];
__shared__ float blockW[LOOP_CO * CI_div_G];
__shared__ float ans[LOOP_CO * CI_div_G];
__shared__ float blockI[64 * LOOP_HW];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
for (int pos = threadIdx.x; pos < CI_div_G * LOOP_CO; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[(g * CO_div_G + step) * CI_div_G + pos];
__syncthreads();
int co = (threadIdx.x >> 6) + step;
#pragma unroll
for (int k = 0; k < LOOP_HW; k++) {
int b_hw = b_hw_start + (threadIdx.x & 63) + k * 64;
int b = b_hw / HW;
int hw = b_hw % HW;
if (b < B) {
grad_out[k] = grad_output[(b * CO + g * CO_div_G + co) * HW + hw];
r_out[k] = 1.0f / output[(b * CO + g * CO_div_G + co) * HW + hw];
}
else {
grad_out[k] = 0.0f;
r_out[k] = 1e-10f;
}
}
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float w = blockW[(threadIdx.x >> 6) * CI_div_G + j];
#pragma unroll
for (int kk = 0; kk < LOOP_HW * 64; kk += LOOP_CO * 64) {
int b = (b_hw_start + kk + threadIdx.x) / HW;
int hw = (b_hw_start + kk + threadIdx.x) % HW;
blockI[kk + threadIdx.x] = b < B ? input[(b * CI + g * CI_div_G + j) * HW + hw] : 0.0f;
}
__syncthreads();
float res = 0.0f;
#pragma unroll
for (int k = 0; k < LOOP_HW; k++) {
float x = blockI[k * 64 + (threadIdx.x & 63)];
float t = w - x;
res += grad_out[k] * pow_fun(abs(t) * r_out[k], p - 1) * (t > 0 ? 1 : -1);
}
res += __shfl_xor_sync(0xffffffff, res, 1);
res += __shfl_xor_sync(0xffffffff, res, 2);
res += __shfl_xor_sync(0xffffffff, res, 4);
res += __shfl_xor_sync(0xffffffff, res, 8);
res += __shfl_xor_sync(0xffffffff, res, 16);
if ((threadIdx.x & 63) == 0) ans[(threadIdx.x >> 6) * CI_div_G + j] = res;
__syncthreads();
if ((threadIdx.x & 63) == 32) ans[(threadIdx.x >> 6) * CI_div_G + j] += res;
}
__syncthreads();
for (int pos = threadIdx.x; pos < CI_div_G * LOOP_CO; pos += THREAD_PER_BLOCK)
atomicAdd(&grad_weight[(g * CO_div_G + step) * CI_div_G + pos], ans[pos]);
}
}
#define CALL_FUNC(func, thread, dim, var1, var2, paras...) \
bool success = true; \
if (var1 == 1 && var2 == 1)hipLaunchKernelGGL(( func<1, 1>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 1 && var2 == 2)hipLaunchKernelGGL(( func<1, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 2 && var2 == 1)hipLaunchKernelGGL(( func<2, 1>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 2 && var2 == 2)hipLaunchKernelGGL(( func<2, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 2 && var2 == 4)hipLaunchKernelGGL(( func<2, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 4 && var2 == 2)hipLaunchKernelGGL(( func<4, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 4 && var2 == 4)hipLaunchKernelGGL(( func<4, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 4 && var2 == 8)hipLaunchKernelGGL(( func<4, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 8 && var2 == 4)hipLaunchKernelGGL(( func<8, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 8 && var2 == 8)hipLaunchKernelGGL(( func<8, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 8 && var2 == 16)hipLaunchKernelGGL(( func<8, 16>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 16 && var2 == 8)hipLaunchKernelGGL(( func<16, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 16 && var2 == 16)hipLaunchKernelGGL(( func<16, 16>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 16 && var2 == 32)hipLaunchKernelGGL(( func<16, 32>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 32 && var2 == 16)hipLaunchKernelGGL(( func<32, 16>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 32 && var2 == 32)hipLaunchKernelGGL(( func<32, 32>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 32 && var2 == 64)hipLaunchKernelGGL(( func<32, 64>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 64 && var2 == 32)hipLaunchKernelGGL(( func<64, 32>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 64 && var2 == 64)hipLaunchKernelGGL(( func<64, 64>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 9 && var2 == 1)hipLaunchKernelGGL(( func<9, 1>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 9 && var2 == 2)hipLaunchKernelGGL(( func<9, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 18 && var2 == 1)hipLaunchKernelGGL(( func<18, 1>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 18 && var2 == 2)hipLaunchKernelGGL(( func<18, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 18 && var2 == 4)hipLaunchKernelGGL(( func<18, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 36 && var2 == 2)hipLaunchKernelGGL(( func<36, 2>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 36 && var2 == 4)hipLaunchKernelGGL(( func<36, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 36 && var2 == 8)hipLaunchKernelGGL(( func<36, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 72 && var2 == 4)hipLaunchKernelGGL(( func<72, 4>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 72 && var2 == 8)hipLaunchKernelGGL(( func<72, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 72 && var2 == 16)hipLaunchKernelGGL(( func<72, 16>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 144 && var2 == 8)hipLaunchKernelGGL(( func<144, 8>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 144 && var2 == 16)hipLaunchKernelGGL(( func<144, 16>), dim3(dim), dim3(thread), 0, 0, paras); \
else if (var1 == 144 && var2 == 32)hipLaunchKernelGGL(( func<144, 32>), dim3(dim), dim3(thread), 0, 0, paras); \
else success = false; \
if (success) return;
const int BLOCK_SIZE = 16;
const int BATCH_BLOCK_SIZE = 8;
__device__ __forceinline__ float update_forward(float x, float w, float p, float r_max_x_sub_w) {
float t = abs(x - w);
return pow_fun(t * r_max_x_sub_w, p);
}
__device__ __forceinline__ void normalize(float& output_reg, float old_max, float r_new_max, float p) {
output_reg = output_reg * pow_fun(old_max * r_new_max, p);
}
#define getX (conv ? blockI[i][threadIdx.x] : blockI[threadIdx.y][i])
#define getW (conv ? blockW[i][threadIdx.y] : blockW[i][threadIdx.x])
template <bool conv>
__global__ void norm_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output, float p) {
float output_reg = 1e-10;
float max_x_sub_w = 1e-10, r_max_x_sub_w = 1.0 / max_x_sub_w;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
else blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
float max_x_sub_w_batch = 0;
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++)
max_x_sub_w_batch = max(max_x_sub_w_batch, abs(getX - getW));
if (max_x_sub_w_batch > max_x_sub_w) {
r_max_x_sub_w = __frcp_rn(max_x_sub_w_batch);
normalize(output_reg, max_x_sub_w, r_max_x_sub_w, p);
max_x_sub_w = max_x_sub_w_batch;
}
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++)
output_reg += update_forward(getX, getW, p, r_max_x_sub_w);
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
if (!conv && k + threadIdx.x < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
float max_x_sub_w_batch = 0;
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++)
max_x_sub_w_batch = max(max_x_sub_w_batch, abs(getX - getW));
if (max_x_sub_w_batch > max_x_sub_w) {
r_max_x_sub_w = __frcp_rn(max_x_sub_w_batch);
normalize(output_reg, max_x_sub_w, r_max_x_sub_w, p);
max_x_sub_w = max_x_sub_w_batch;
}
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++)
output_reg += update_forward(getX, getW, p, r_max_x_sub_w);
__syncthreads();
}
if (b < B && write_co < CO_div_G) {
output_reg = __powf(output_reg, 1.0 / p) * max_x_sub_w;
output[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_reg;
}
}
__device__ __forceinline__ float update_backward_input(float x, float w, float r_o, float g, float p) {
float t = x - w;
return g * pow_fun(abs(t) * r_o, p - 1) * (t > 0 ? 1 : -1);
}
template <bool conv>
__global__ void norm_dist_backward_input_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_input, float p) {
float output_reg = 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_ci = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_ci = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ float blockO[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockG[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
float x = 0;
if (b < B && write_ci < CI_div_G) x = input[((b * G + blockIdx.z) * CI_div_G + write_ci) * HW + hw];
int k;
for (k = 0; k < (CO_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw];
}
else {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw];
}
}
if (read_ci < CI_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + k + threadIdx.y) * CI_div_G + read_ci];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
if (conv) output_reg += update_backward_input(x, blockW[threadIdx.y][i], blockO[i][threadIdx.x], blockG[i][threadIdx.x], p);
else output_reg += update_backward_input(x, blockW[threadIdx.x][i], blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
}
__syncthreads();
}
if (CO_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CO_div_G){
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw];
}
if (!conv && k + threadIdx.x < CO_div_G){
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw];
}
}
if (k + threadIdx.y < CO_div_G && read_ci < CI_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + k + threadIdx.y) * CI_div_G + read_ci];
__syncthreads();
for (int i = 0; i < (CO_div_G & (BLOCK_SIZE - 1)); i++) {
if (conv) output_reg += update_backward_input(x, blockW[threadIdx.y][i], blockO[i][threadIdx.x], blockG[i][threadIdx.x], p);
else output_reg += update_backward_input(x, blockW[threadIdx.x][i], blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
}
__syncthreads();
}
if (b < B && write_ci < CI_div_G)
grad_input[((b * G + blockIdx.z) * CI_div_G + write_ci) * HW + hw] = output_reg;
}
__device__ __forceinline__ float update_backward_weight(float x, float w, float r_o, float g, float p) {
float t = w - x;
return g * pow_fun(abs(t) * r_o, p - 1) * (t > 0 ? 1 : -1);
}
template <bool conv>
__global__ void norm_dist_backward_weight_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_weight, float p) {
float output_reg = 0;
int write_co = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int write_ci = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int read_co = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_ci = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int B_start = B * (blockIdx.z % BATCH_BLOCK_SIZE) / BATCH_BLOCK_SIZE;
int B_end = B * (blockIdx.z % BATCH_BLOCK_SIZE + 1) / BATCH_BLOCK_SIZE;
int g = blockIdx.z / BATCH_BLOCK_SIZE;
int B_num = B_end - B_start;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE + 2]; // B * CI if conv else CI * B
__shared__ float blockO[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockG[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
float w = 0;
if (write_co < CO_div_G && write_ci < CI_div_G) w = weight[(g * CO_div_G + write_co) * CI_div_G + write_ci];
int k;
for (k = 0; k < ((B_num * HW) & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
int b = B_start + (conv ? (k + threadIdx.x) / HW : k + threadIdx.y);
int hw = conv ? (k + threadIdx.x) % HW : 0;
if (read_ci < CI_div_G) blockI[threadIdx.x][threadIdx.y] = input[((b * G + g) * CI_div_G + read_ci) * HW + hw];
if (read_co < CO_div_G) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + g) * CO_div_G + read_co) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + g) * CO_div_G + read_co) * HW + hw];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
if (conv) output_reg += update_backward_weight(blockI[i][threadIdx.x], w, blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
else output_reg += update_backward_weight(blockI[threadIdx.x][i], w, blockO[i][threadIdx.y], blockG[i][threadIdx.y], p);
}
__syncthreads();
}
if ((B_num * HW) & (BLOCK_SIZE - 1)) {
int b = B_start + (conv ? (k + threadIdx.x) / HW : k + threadIdx.y);
int hw = conv ? (k + threadIdx.x) % HW : 0;
if (b < B_end) {
if (read_ci < CI_div_G) blockI[threadIdx.x][threadIdx.y] = input[((b * G + g) * CI_div_G + read_ci) * HW + hw];
if (read_co < CO_div_G) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + g) * CO_div_G + read_co) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + g) * CO_div_G + read_co) * HW + hw];
}
}
__syncthreads();
for (int i = 0; i < ((B_num * HW) & (BLOCK_SIZE - 1)); i++) {
if (conv) output_reg += update_backward_weight(blockI[i][threadIdx.x], w, blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
else output_reg += update_backward_weight(blockI[threadIdx.x][i], w, blockO[i][threadIdx.y], blockG[i][threadIdx.y], p);
}
__syncthreads();
}
if (write_co < CO_div_G && write_ci < CI_div_G)
atomicAdd(&grad_weight[(g * CO_div_G + write_co) * CI_div_G + write_ci], output_reg);
}
void norm_dist_forward_cuda(const float* input, const float* weight,
int B, int CO, int CI, int G, int HW, float* output, float p) {
dim3 dimGrid(G, (B * HW - 1) / THREAD_PER_BLOCK + 1);
CALL_FUNC(norm_dist_forward_kernel, THREAD_PER_BLOCK, dimGrid, CI / G, CO / G, input, weight, B, CO, CI, HW, output, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1)hipLaunchKernelGGL(( norm_dist_forward_kernel<false>), dim3(dimGrid2), dim3(dimBlock), 0, 0, input, weight, B, CO / G, CI / G, HW, G, output, p);
elsehipLaunchKernelGGL(( norm_dist_forward_kernel<true>), dim3(dimGrid2), dim3(dimBlock), 0, 0, input, weight, B, CO / G, CI / G, HW, G, output, p);
}
void norm_dist_backward_input_cuda(const float* grad_output, const float* input, const float* weight, const float* output,
int B, int CO, int CI, int G, int HW, float* grad_input, float p) {
dim3 dimGrid(G, (B * HW - 1) / THREAD_PER_BLOCK + 1);
CALL_FUNC(norm_dist_backward_input_kernel, THREAD_PER_BLOCK, dimGrid, CI / G, CO / G, grad_output, input, weight, output, B, CO, CI, HW, grad_input, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CI / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1)hipLaunchKernelGGL(( norm_dist_backward_input_kernel<false>), dim3(dimGrid2), dim3(dimBlock), 0, 0, grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_input, p);
elsehipLaunchKernelGGL(( norm_dist_backward_input_kernel<true>), dim3(dimGrid2), dim3(dimBlock), 0, 0, grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_input, p);
}
void norm_dist_backward_weight_cuda(const float* grad_output, const float* input, const float* weight, const float* output,
int B, int CO, int CI, int G, int HW, float* grad_weight, float p) {
dim3 dimGrid(G, (B * HW - 1) / (64 * 8) + 1);
CALL_FUNC(norm_dist_backward_weight_kernel, min(64 * (CO / G), THREAD_PER_BLOCK), dimGrid, CI / G, CO / G, grad_output, input, weight, output, B, CO, CI, HW, grad_weight, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CI / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, BATCH_BLOCK_SIZE * G);
if (HW == 1)hipLaunchKernelGGL(( norm_dist_backward_weight_kernel<false>), dim3(dimGrid2), dim3(dimBlock), 0, 0, grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_weight, p);
elsehipLaunchKernelGGL(( norm_dist_backward_weight_kernel<true>), dim3(dimGrid2), dim3(dimBlock), 0, 0, grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_weight, p);
}
| 90582267b1c17b5ebafe4d8aa893813ba021912f.cu | #include <cassert>
#include <cstdio>
#define THREAD_PER_BLOCK 256
#define BLOCKS_PER_SM 4
//each thread can only have 64 registers
__device__ __forceinline__ float pow_fun(float x, float p) {
return __powf(x, p);
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO, int CI, int HW, float* __restrict__ output, float p) {
int b_hw = blockIdx.y * THREAD_PER_BLOCK + threadIdx.x;
int b = b_hw / HW;
int hw = b_hw % HW;
int g = blockIdx.x;
__shared__ float blockW[CO_div_G * CI_div_G];
for (int pos = threadIdx.x; pos < CI_div_G * CO_div_G; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[g * CO_div_G * CI_div_G + pos];
__syncthreads();
if (b >= B) return;
const int LOOP_CO = CO_div_G > 8 ? 8 : CO_div_G;
float r_max_x_sub_w[LOOP_CO], ans[LOOP_CO];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
#pragma unroll
for (int i = 0; i < LOOP_CO; i++)
r_max_x_sub_w[i] = 1e-10f;
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
r_max_x_sub_w[i] = max(r_max_x_sub_w[i], abs(x - w));
}
}
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
r_max_x_sub_w[i] = 1.0f / r_max_x_sub_w[i];
ans[i] = 1e-10f;
}
#pragma unroll(1)
for (int j = CI_div_G - 1; j >= 0; j--) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
ans[i] += pow_fun(abs(x - w) * r_max_x_sub_w[i], p);
}
}
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float res = __powf(ans[i], 1.0f / p) / r_max_x_sub_w[i];
output[(b * CO + g * CO_div_G + step + i) * HW + hw] = res;
}
}
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_backward_input_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO, int CI, int HW, float* __restrict__ grad_input, float p) {
int b_hw = blockIdx.y * THREAD_PER_BLOCK + threadIdx.x;
int b = b_hw / HW;
int hw = b_hw % HW;
int g = blockIdx.x;
__shared__ float blockW[CO_div_G * CI_div_G];
for (int pos = threadIdx.x; pos < CI_div_G * CO_div_G; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[g * CO_div_G * CI_div_G + pos];
__syncthreads();
if (b >= B) return;
const int LOOP_CO = CO_div_G > 8 ? 8 : CO_div_G;
float grad_out[LOOP_CO], r_out[LOOP_CO];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
grad_out[i] = grad_output[(b * CO + g * CO_div_G + step + i) * HW + hw];
r_out[i] = 1.0 / output[(b * CO + g * CO_div_G + step + i) * HW + hw];
}
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float x = input[(b * CI + g * CI_div_G + j) * HW + hw];
float ans = 0.0f;
#pragma unroll
for (int i = 0; i < LOOP_CO; i++) {
float w = blockW[(step + i) * CI_div_G + j];
float t = x - w;
ans += grad_out[i] * pow_fun(abs(t) * r_out[i], p - 1) * (t > 0 ? 1 : -1);
}
if (step == 0) grad_input[(b * CI + g * CI_div_G + j) * HW + hw] = ans;
else grad_input[(b * CI + g * CI_div_G + j) * HW + hw] += ans;
}
}
}
template <int CI_div_G, int CO_div_G>
__global__ void __launch_bounds__(THREAD_PER_BLOCK, BLOCKS_PER_SM)
norm_dist_backward_weight_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO, int CI, int HW, float* __restrict__ grad_weight, float p) {
const int LOOP_HW = 8;
const int LOOP_CO = CO_div_G > THREAD_PER_BLOCK / 64 ? THREAD_PER_BLOCK / 64 : CO_div_G;
int b_hw_start = blockIdx.y * 64 * LOOP_HW;
int g = blockIdx.x;
float grad_out[LOOP_HW], r_out[LOOP_HW];
__shared__ float blockW[LOOP_CO * CI_div_G];
__shared__ float ans[LOOP_CO * CI_div_G];
__shared__ float blockI[64 * LOOP_HW];
#pragma unroll(1)
for (int step = 0; step < CO_div_G; step += LOOP_CO) {
for (int pos = threadIdx.x; pos < CI_div_G * LOOP_CO; pos += THREAD_PER_BLOCK)
blockW[pos] = weight[(g * CO_div_G + step) * CI_div_G + pos];
__syncthreads();
int co = (threadIdx.x >> 6) + step;
#pragma unroll
for (int k = 0; k < LOOP_HW; k++) {
int b_hw = b_hw_start + (threadIdx.x & 63) + k * 64;
int b = b_hw / HW;
int hw = b_hw % HW;
if (b < B) {
grad_out[k] = grad_output[(b * CO + g * CO_div_G + co) * HW + hw];
r_out[k] = 1.0f / output[(b * CO + g * CO_div_G + co) * HW + hw];
}
else {
grad_out[k] = 0.0f;
r_out[k] = 1e-10f;
}
}
#pragma unroll(1)
for (int j = 0; j < CI_div_G; j++) {
float w = blockW[(threadIdx.x >> 6) * CI_div_G + j];
#pragma unroll
for (int kk = 0; kk < LOOP_HW * 64; kk += LOOP_CO * 64) {
int b = (b_hw_start + kk + threadIdx.x) / HW;
int hw = (b_hw_start + kk + threadIdx.x) % HW;
blockI[kk + threadIdx.x] = b < B ? input[(b * CI + g * CI_div_G + j) * HW + hw] : 0.0f;
}
__syncthreads();
float res = 0.0f;
#pragma unroll
for (int k = 0; k < LOOP_HW; k++) {
float x = blockI[k * 64 + (threadIdx.x & 63)];
float t = w - x;
res += grad_out[k] * pow_fun(abs(t) * r_out[k], p - 1) * (t > 0 ? 1 : -1);
}
res += __shfl_xor_sync(0xffffffff, res, 1);
res += __shfl_xor_sync(0xffffffff, res, 2);
res += __shfl_xor_sync(0xffffffff, res, 4);
res += __shfl_xor_sync(0xffffffff, res, 8);
res += __shfl_xor_sync(0xffffffff, res, 16);
if ((threadIdx.x & 63) == 0) ans[(threadIdx.x >> 6) * CI_div_G + j] = res;
__syncthreads();
if ((threadIdx.x & 63) == 32) ans[(threadIdx.x >> 6) * CI_div_G + j] += res;
}
__syncthreads();
for (int pos = threadIdx.x; pos < CI_div_G * LOOP_CO; pos += THREAD_PER_BLOCK)
atomicAdd(&grad_weight[(g * CO_div_G + step) * CI_div_G + pos], ans[pos]);
}
}
#define CALL_FUNC(func, thread, dim, var1, var2, paras...) \
bool success = true; \
if (var1 == 1 && var2 == 1) func<1, 1><<<dim, thread>>>(paras); \
else if (var1 == 1 && var2 == 2) func<1, 2><<<dim, thread>>>(paras); \
else if (var1 == 2 && var2 == 1) func<2, 1><<<dim, thread>>>(paras); \
else if (var1 == 2 && var2 == 2) func<2, 2><<<dim, thread>>>(paras); \
else if (var1 == 2 && var2 == 4) func<2, 4><<<dim, thread>>>(paras); \
else if (var1 == 4 && var2 == 2) func<4, 2><<<dim, thread>>>(paras); \
else if (var1 == 4 && var2 == 4) func<4, 4><<<dim, thread>>>(paras); \
else if (var1 == 4 && var2 == 8) func<4, 8><<<dim, thread>>>(paras); \
else if (var1 == 8 && var2 == 4) func<8, 4><<<dim, thread>>>(paras); \
else if (var1 == 8 && var2 == 8) func<8, 8><<<dim, thread>>>(paras); \
else if (var1 == 8 && var2 == 16) func<8, 16><<<dim, thread>>>(paras); \
else if (var1 == 16 && var2 == 8) func<16, 8><<<dim, thread>>>(paras); \
else if (var1 == 16 && var2 == 16) func<16, 16><<<dim, thread>>>(paras); \
else if (var1 == 16 && var2 == 32) func<16, 32><<<dim, thread>>>(paras); \
else if (var1 == 32 && var2 == 16) func<32, 16><<<dim, thread>>>(paras); \
else if (var1 == 32 && var2 == 32) func<32, 32><<<dim, thread>>>(paras); \
else if (var1 == 32 && var2 == 64) func<32, 64><<<dim, thread>>>(paras); \
else if (var1 == 64 && var2 == 32) func<64, 32><<<dim, thread>>>(paras); \
else if (var1 == 64 && var2 == 64) func<64, 64><<<dim, thread>>>(paras); \
else if (var1 == 9 && var2 == 1) func<9, 1><<<dim, thread>>>(paras); \
else if (var1 == 9 && var2 == 2) func<9, 2><<<dim, thread>>>(paras); \
else if (var1 == 18 && var2 == 1) func<18, 1><<<dim, thread>>>(paras); \
else if (var1 == 18 && var2 == 2) func<18, 2><<<dim, thread>>>(paras); \
else if (var1 == 18 && var2 == 4) func<18, 4><<<dim, thread>>>(paras); \
else if (var1 == 36 && var2 == 2) func<36, 2><<<dim, thread>>>(paras); \
else if (var1 == 36 && var2 == 4) func<36, 4><<<dim, thread>>>(paras); \
else if (var1 == 36 && var2 == 8) func<36, 8><<<dim, thread>>>(paras); \
else if (var1 == 72 && var2 == 4) func<72, 4><<<dim, thread>>>(paras); \
else if (var1 == 72 && var2 == 8) func<72, 8><<<dim, thread>>>(paras); \
else if (var1 == 72 && var2 == 16) func<72, 16><<<dim, thread>>>(paras); \
else if (var1 == 144 && var2 == 8) func<144, 8><<<dim, thread>>>(paras); \
else if (var1 == 144 && var2 == 16) func<144, 16><<<dim, thread>>>(paras); \
else if (var1 == 144 && var2 == 32) func<144, 32><<<dim, thread>>>(paras); \
else success = false; \
if (success) return;
const int BLOCK_SIZE = 16;
const int BATCH_BLOCK_SIZE = 8;
__device__ __forceinline__ float update_forward(float x, float w, float p, float r_max_x_sub_w) {
float t = abs(x - w);
return pow_fun(t * r_max_x_sub_w, p);
}
__device__ __forceinline__ void normalize(float& output_reg, float old_max, float r_new_max, float p) {
output_reg = output_reg * pow_fun(old_max * r_new_max, p);
}
#define getX (conv ? blockI[i][threadIdx.x] : blockI[threadIdx.y][i])
#define getW (conv ? blockW[i][threadIdx.y] : blockW[i][threadIdx.x])
template <bool conv>
__global__ void norm_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output, float p) {
float output_reg = 1e-10;
float max_x_sub_w = 1e-10, r_max_x_sub_w = 1.0 / max_x_sub_w;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
else blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
float max_x_sub_w_batch = 0;
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++)
max_x_sub_w_batch = max(max_x_sub_w_batch, abs(getX - getW));
if (max_x_sub_w_batch > max_x_sub_w) {
r_max_x_sub_w = __frcp_rn(max_x_sub_w_batch);
normalize(output_reg, max_x_sub_w, r_max_x_sub_w, p);
max_x_sub_w = max_x_sub_w_batch;
}
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++)
output_reg += update_forward(getX, getW, p, r_max_x_sub_w);
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
if (!conv && k + threadIdx.x < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
float max_x_sub_w_batch = 0;
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++)
max_x_sub_w_batch = max(max_x_sub_w_batch, abs(getX - getW));
if (max_x_sub_w_batch > max_x_sub_w) {
r_max_x_sub_w = __frcp_rn(max_x_sub_w_batch);
normalize(output_reg, max_x_sub_w, r_max_x_sub_w, p);
max_x_sub_w = max_x_sub_w_batch;
}
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++)
output_reg += update_forward(getX, getW, p, r_max_x_sub_w);
__syncthreads();
}
if (b < B && write_co < CO_div_G) {
output_reg = __powf(output_reg, 1.0 / p) * max_x_sub_w;
output[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_reg;
}
}
__device__ __forceinline__ float update_backward_input(float x, float w, float r_o, float g, float p) {
float t = x - w;
return g * pow_fun(abs(t) * r_o, p - 1) * (t > 0 ? 1 : -1);
}
template <bool conv>
__global__ void norm_dist_backward_input_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_input, float p) {
float output_reg = 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_ci = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_ci = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ float blockO[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockG[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
float x = 0;
if (b < B && write_ci < CI_div_G) x = input[((b * G + blockIdx.z) * CI_div_G + write_ci) * HW + hw];
int k;
for (k = 0; k < (CO_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw];
}
else {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw];
}
}
if (read_ci < CI_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + k + threadIdx.y) * CI_div_G + read_ci];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
if (conv) output_reg += update_backward_input(x, blockW[threadIdx.y][i], blockO[i][threadIdx.x], blockG[i][threadIdx.x], p);
else output_reg += update_backward_input(x, blockW[threadIdx.x][i], blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
}
__syncthreads();
}
if (CO_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CO_div_G){
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.y) * HW + hw];
}
if (!conv && k + threadIdx.x < CO_div_G){
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + blockIdx.z) * CO_div_G + k + threadIdx.x) * HW + hw];
}
}
if (k + threadIdx.y < CO_div_G && read_ci < CI_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + k + threadIdx.y) * CI_div_G + read_ci];
__syncthreads();
for (int i = 0; i < (CO_div_G & (BLOCK_SIZE - 1)); i++) {
if (conv) output_reg += update_backward_input(x, blockW[threadIdx.y][i], blockO[i][threadIdx.x], blockG[i][threadIdx.x], p);
else output_reg += update_backward_input(x, blockW[threadIdx.x][i], blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
}
__syncthreads();
}
if (b < B && write_ci < CI_div_G)
grad_input[((b * G + blockIdx.z) * CI_div_G + write_ci) * HW + hw] = output_reg;
}
__device__ __forceinline__ float update_backward_weight(float x, float w, float r_o, float g, float p) {
float t = w - x;
return g * pow_fun(abs(t) * r_o, p - 1) * (t > 0 ? 1 : -1);
}
template <bool conv>
__global__ void norm_dist_backward_weight_kernel(const float* __restrict__ grad_output, const float* __restrict__ input,
const float* __restrict__ weight, const float* __restrict__ output,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_weight, float p) {
float output_reg = 0;
int write_co = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int write_ci = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int read_co = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_ci = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int B_start = B * (blockIdx.z % BATCH_BLOCK_SIZE) / BATCH_BLOCK_SIZE;
int B_end = B * (blockIdx.z % BATCH_BLOCK_SIZE + 1) / BATCH_BLOCK_SIZE;
int g = blockIdx.z / BATCH_BLOCK_SIZE;
int B_num = B_end - B_start;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE + 2]; // B * CI if conv else CI * B
__shared__ float blockO[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
__shared__ float blockG[BLOCK_SIZE][BLOCK_SIZE]; // CO * B if conv else B * CO
float w = 0;
if (write_co < CO_div_G && write_ci < CI_div_G) w = weight[(g * CO_div_G + write_co) * CI_div_G + write_ci];
int k;
for (k = 0; k < ((B_num * HW) & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
int b = B_start + (conv ? (k + threadIdx.x) / HW : k + threadIdx.y);
int hw = conv ? (k + threadIdx.x) % HW : 0;
if (read_ci < CI_div_G) blockI[threadIdx.x][threadIdx.y] = input[((b * G + g) * CI_div_G + read_ci) * HW + hw];
if (read_co < CO_div_G) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + g) * CO_div_G + read_co) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + g) * CO_div_G + read_co) * HW + hw];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
if (conv) output_reg += update_backward_weight(blockI[i][threadIdx.x], w, blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
else output_reg += update_backward_weight(blockI[threadIdx.x][i], w, blockO[i][threadIdx.y], blockG[i][threadIdx.y], p);
}
__syncthreads();
}
if ((B_num * HW) & (BLOCK_SIZE - 1)) {
int b = B_start + (conv ? (k + threadIdx.x) / HW : k + threadIdx.y);
int hw = conv ? (k + threadIdx.x) % HW : 0;
if (b < B_end) {
if (read_ci < CI_div_G) blockI[threadIdx.x][threadIdx.y] = input[((b * G + g) * CI_div_G + read_ci) * HW + hw];
if (read_co < CO_div_G) {
blockO[threadIdx.y][threadIdx.x] = __frcp_rn(output[((b * G + g) * CO_div_G + read_co) * HW + hw]);
blockG[threadIdx.y][threadIdx.x] = grad_output[((b * G + g) * CO_div_G + read_co) * HW + hw];
}
}
__syncthreads();
for (int i = 0; i < ((B_num * HW) & (BLOCK_SIZE - 1)); i++) {
if (conv) output_reg += update_backward_weight(blockI[i][threadIdx.x], w, blockO[threadIdx.y][i], blockG[threadIdx.y][i], p);
else output_reg += update_backward_weight(blockI[threadIdx.x][i], w, blockO[i][threadIdx.y], blockG[i][threadIdx.y], p);
}
__syncthreads();
}
if (write_co < CO_div_G && write_ci < CI_div_G)
atomicAdd(&grad_weight[(g * CO_div_G + write_co) * CI_div_G + write_ci], output_reg);
}
void norm_dist_forward_cuda(const float* input, const float* weight,
int B, int CO, int CI, int G, int HW, float* output, float p) {
dim3 dimGrid(G, (B * HW - 1) / THREAD_PER_BLOCK + 1);
CALL_FUNC(norm_dist_forward_kernel, THREAD_PER_BLOCK, dimGrid, CI / G, CO / G, input, weight, B, CO, CI, HW, output, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) norm_dist_forward_kernel<false><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, p);
else norm_dist_forward_kernel<true><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, p);
}
void norm_dist_backward_input_cuda(const float* grad_output, const float* input, const float* weight, const float* output,
int B, int CO, int CI, int G, int HW, float* grad_input, float p) {
dim3 dimGrid(G, (B * HW - 1) / THREAD_PER_BLOCK + 1);
CALL_FUNC(norm_dist_backward_input_kernel, THREAD_PER_BLOCK, dimGrid, CI / G, CO / G, grad_output, input, weight, output, B, CO, CI, HW, grad_input, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CI / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) norm_dist_backward_input_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_input, p);
else norm_dist_backward_input_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_input, p);
}
void norm_dist_backward_weight_cuda(const float* grad_output, const float* input, const float* weight, const float* output,
int B, int CO, int CI, int G, int HW, float* grad_weight, float p) {
dim3 dimGrid(G, (B * HW - 1) / (64 * 8) + 1);
CALL_FUNC(norm_dist_backward_weight_kernel, min(64 * (CO / G), THREAD_PER_BLOCK), dimGrid, CI / G, CO / G, grad_output, input, weight, output, B, CO, CI, HW, grad_weight, p);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CI / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, BATCH_BLOCK_SIZE * G);
if (HW == 1) norm_dist_backward_weight_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_weight, p);
else norm_dist_backward_weight_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, input, weight, output, B, CO / G, CI / G, HW, G, grad_weight, p);
}
|
ef6de077e1b59bdbf157112d148d038337b4f04c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include "../../utils.h"
#define PRIVATE_CASE_TYPE_AND_VAL(ENUM_TYPE, TYPE, TYPE_NAME, VAL, ...) \
case ENUM_TYPE: { \
using TYPE_NAME = TYPE; \
const int block_size = VAL; \
return __VA_ARGS__(); \
}
#define DISPATCH_INPUT_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \
[&] { \
switch(TYPE) \
{ \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Float, float, TYPE_NAME, 1024, __VA_ARGS__) \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Double, double, TYPE_NAME, 512, __VA_ARGS__) \
default: \
AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \
} \
}()
namespace kaolin {
template<typename scalar_t, int BLOCK_SIZE>
__global__ void packed_rasterize_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_z,
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const scalar_t* __restrict__ face_features,
const int64_t* __restrict__ first_idx_face_per_mesh,
int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ output_weights,
scalar_t* __restrict__ interpolated_features,
const int batch_size,
const int height,
const int width,
const int num_faces,
const int num_features,
const float multiplier,
const float eps) {
__shared__ scalar_t shm_pointsbbox[BLOCK_SIZE][4];
for (int bidx = blockIdx.y; bidx < batch_size; bidx += gridDim.y) {
for (int start_pixel_idx = blockIdx.x * blockDim.x;
start_pixel_idx < width * height;
start_pixel_idx += gridDim.x * blockDim.x) {
const int pixel_idx = start_pixel_idx + threadIdx.x;
const int wididx = pixel_idx % width;
const int heiidx = (pixel_idx - wididx) / width;
const int first_id_faces = first_idx_face_per_mesh[bidx];
const int last_id_faces = first_idx_face_per_mesh[bidx + 1];
scalar_t max_z0 = -INFINITY;
int max_face_idx = -1;
scalar_t max_w0 = 0.;
scalar_t max_w1 = 0.;
scalar_t max_w2 = 0.;
bool is_active_pixel = heiidx < height;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + pixel_idx;
const int totalidx3 = totalidx1 * 3;
const int totalidxd = totalidx1 * num_features;
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
for (int start_face_idx = first_id_faces;
start_face_idx < last_id_faces;
start_face_idx += BLOCK_SIZE) {
const int remaining_faces = last_id_faces - start_face_idx;
const int num_faces_this_iter = remaining_faces > BLOCK_SIZE ? BLOCK_SIZE : remaining_faces;
__syncthreads();
#pragma unroll
for (int ii = 0; ii < 4; ii++) {
const int _start_idx = start_face_idx * 4 + threadIdx.x + ii * blockDim.x;
if (_start_idx < (last_id_faces * 4)) {
shm_pointsbbox[((threadIdx.x - (threadIdx.x % 4) + ii * blockDim.x) / 4)][threadIdx.x % 4] = \
face_bboxes[_start_idx];
}
}
__syncthreads();
if (!(is_active_pixel)) {
continue;
}
for (int ii = 0; ii < num_faces_this_iter; ii++) {
int face_idx = ii + start_face_idx;
// This is a bounding box of the face
const scalar_t xmin = shm_pointsbbox[ii][0];
const scalar_t ymin = shm_pointsbbox[ii][1];
const scalar_t xmax = shm_pointsbbox[ii][2];
const scalar_t ymax = shm_pointsbbox[ii][3];
// The pixel doesn't lie in the bounding box
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
const int shift1 = face_idx;
const int shift3 = shift1 * 3;
const int shift6 = shift1 * 6;
// if this pixel is covered by this face, then we check its depth and weights
const scalar_t ax = face_vertices_image[shift6 + 0];
const scalar_t ay = face_vertices_image[shift6 + 1];
const scalar_t bx = face_vertices_image[shift6 + 2];
const scalar_t by = face_vertices_image[shift6 + 3];
const scalar_t cx = face_vertices_image[shift6 + 4];
const scalar_t cy = face_vertices_image[shift6 + 5];
const scalar_t a_edge_x = ax - x0;
const scalar_t a_edge_y = ay - y0;
const scalar_t b_edge_x = bx - x0;
const scalar_t b_edge_y = by - y0;
const scalar_t c_edge_x = cx - x0;
const scalar_t c_edge_y = cy - y0;
scalar_t w0 = b_edge_x * c_edge_y - b_edge_y * c_edge_x;
scalar_t w1 = c_edge_x * a_edge_y - c_edge_y * a_edge_x;
scalar_t w2 = a_edge_x * b_edge_y - a_edge_y * b_edge_x;
scalar_t norm = w0 + w1 + w2;
norm += copysign(static_cast<double>(eps),
static_cast<double>(norm));
w0 /= norm;
w1 /= norm;
w2 /= norm;
// The pixel doesn't lie in the triangle
if (w0 < 0. || w1 < 0. || w2 < 0.) {
continue;
}
// if it is perspective, then this way has a little error
// because face plane may not be parallel to the image plane
// but let's ignore it first
const scalar_t az = face_vertices_z[shift3 + 0];
const scalar_t bz = face_vertices_z[shift3 + 1];
const scalar_t cz = face_vertices_z[shift3 + 2];
const scalar_t z0 = w0 * az + w1 * bz + w2 * cz;
// The intersection is not the closest from the camera
if (z0 <= max_z0) {
continue;
}
max_z0 = z0;
max_face_idx = face_idx;
max_w0 = w0;
max_w1 = w1;
max_w2 = w2;
}
}
if (max_face_idx > -1) {
// index
selected_face_idx[totalidx1] = max_face_idx - first_id_faces;
const int shift3d = max_face_idx * 3 * num_features;
// wei
output_weights[totalidx3 + 0] = max_w0;
output_weights[totalidx3 + 1] = max_w1;
output_weights[totalidx3 + 2] = max_w2;
// color
for (int d = 0; d < num_features; d++) {
const scalar_t r0 = face_features[shift3d + d];
const scalar_t r1 = face_features[shift3d + num_features + d];
const scalar_t r2 = face_features[shift3d + num_features + num_features + d];
interpolated_features[totalidxd + d] = max_w0 * r0 + max_w1 * r1 + max_w2 * r2;
}
}
}
}
}
void packed_rasterize_forward_cuda_impl(
const at::Tensor face_vertices_z,
const at::Tensor face_vertices_image,
const at::Tensor face_bboxes,
const at::Tensor face_features,
const at::Tensor num_face_per_mesh,
at::Tensor selected_face_idx,
at::Tensor output_weights,
at::Tensor interpolated_features,
const float multiplier,
const float eps) {
const int num_faces = face_vertices_z.size(1);
const int batch_size = interpolated_features.size(0);
const int height = interpolated_features.size(1);
const int width = interpolated_features.size(2);
const int num_features = interpolated_features.size(3);
const int num_pixels = height * width;
DISPATCH_INPUT_TYPES(face_vertices_z.scalar_type(), scalar_t,
"packed_rasterize_forward_cuda", [&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(face_vertices_z));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int num_blocks_per_sample = num_pixels / block_size + 1;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(num_blocks_per_sample, 1, 1);
hipLaunchKernelGGL(( packed_rasterize_forward_cuda_kernel<scalar_t, block_size>), dim3(blocks), dim3(threads), 0, stream,
face_vertices_z.data_ptr<scalar_t>(),
face_vertices_image.data_ptr<scalar_t>(),
face_bboxes.data_ptr<scalar_t>(),
face_features.data_ptr<scalar_t>(),
num_face_per_mesh.data_ptr<int64_t>(),
selected_face_idx.data_ptr<int64_t>(),
output_weights.data_ptr<scalar_t>(),
interpolated_features.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, num_features,
multiplier, eps);
AT_CUDA_CHECK(hipGetLastError());
});
}
template<typename scalar_t>
__global__ void rasterize_backward_cuda_kernel(
const scalar_t* __restrict__ grad_interpolated_features,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ output_weights,
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_features,
scalar_t* __restrict__ grad_face_vertices_image,
scalar_t* __restrict__ grad_face_features,
const int batch_size,
const int height,
const int width,
const int num_faces,
const int feat_dim,
const float eps) {
const int num_pixels = height * width;
// Each iteration is treating a single feature of a single pixel
for (int true_pixel_idx = blockIdx.x * blockDim.x + threadIdx.x;
true_pixel_idx < batch_size * num_pixels;
true_pixel_idx += blockDim.x * gridDim.x) {
const int pixel_idx = true_pixel_idx % num_pixels;
const int batch_idx = (true_pixel_idx - pixel_idx) / num_pixels;
const int start_weight_idx = true_pixel_idx * 3;
const int start_feat_idx = true_pixel_idx * feat_dim;
const int face_idx = selected_face_idx[true_pixel_idx];
if (face_idx >= 0) {
const int true_face_idx = batch_idx * num_faces + face_idx;
const int start_image_idx = true_face_idx * 6;
const int start_features_idx = true_face_idx * 3 * feat_dim;
// gradient of face_features
#pragma unroll
for (int ii = 0; ii < 3; ii++) {
scalar_t w = output_weights[start_weight_idx + ii];
int pointshift = start_features_idx + ii * feat_dim;
for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) {
int colorshift = pointshift + feat_idx;
// this should be atomic operation
scalar_t *addr = grad_face_features + colorshift;
scalar_t val = grad_interpolated_features[start_feat_idx + feat_idx] * w;
atomicAdd(addr, val);
}
}
// gradient of points
// here, we calculate dl/dp
// dl/dp = dldI * dI/dp
// dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp
const scalar_t ax = face_vertices_image[start_image_idx + 0];
const scalar_t ay = face_vertices_image[start_image_idx + 1];
const scalar_t bx = face_vertices_image[start_image_idx + 2];
const scalar_t by = face_vertices_image[start_image_idx + 3];
const scalar_t cx = face_vertices_image[start_image_idx + 4];
const scalar_t cy = face_vertices_image[start_image_idx + 5];
const scalar_t aw = output_weights[start_weight_idx + 0];
const scalar_t bw = output_weights[start_weight_idx + 1];
const scalar_t cw = output_weights[start_weight_idx + 2];
const scalar_t x0 = aw * ax + bw * bx + cw * cx;
const scalar_t y0 = aw * ay + bw * by + cw * cy;
const scalar_t m = bx - ax;
const scalar_t p = by - ay;
const scalar_t n = cx - ax;
const scalar_t q = cy - ay;
const scalar_t s = x0 - ax;
const scalar_t t = y0 - ay;
// m * w1 + n * w2 = s
// p * w1 + q * w2 = t
// w1 = (sq - nt) / (mq - np)
// w2 = (mt - sp) / (mq - np)
const scalar_t k1 = s * q - n * t;
const scalar_t k2 = m * t - s * p;
scalar_t k3 = m * q - n * p;
k3 += copysign(static_cast<double>(eps), static_cast<double>(k3));
const scalar_t dk1dm = 0;
const scalar_t dk1dn = -t;
const scalar_t dk1dp = 0;
const scalar_t dk1dq = s;
const scalar_t dk1ds = q;
const scalar_t dk1dt = -n;
const scalar_t dk2dm = t;
const scalar_t dk2dn = 0;
const scalar_t dk2dp = -s;
const scalar_t dk2dq = 0;
const scalar_t dk2ds = -p;
const scalar_t dk2dt = m;
const scalar_t dk3dm = q;
const scalar_t dk3dn = -p;
const scalar_t dk3dp = -n;
const scalar_t dk3dq = m;
const scalar_t dk3ds = 0;
const scalar_t dk3dt = 0;
// w1 = k1 / k3
// w2 = k2 / k3
// we need divide k3 ^ 2
const scalar_t dw1dm = dk1dm * k3 - dk3dm * k1;
const scalar_t dw1dn = dk1dn * k3 - dk3dn * k1;
const scalar_t dw1dp = dk1dp * k3 - dk3dp * k1;
const scalar_t dw1dq = dk1dq * k3 - dk3dq * k1;
const scalar_t dw1ds = dk1ds * k3 - dk3ds * k1;
const scalar_t dw1dt = dk1dt * k3 - dk3dt * k1;
const scalar_t dw2dm = dk2dm * k3 - dk3dm * k2;
const scalar_t dw2dn = dk2dn * k3 - dk3dn * k2;
const scalar_t dw2dp = dk2dp * k3 - dk3dp * k2;
const scalar_t dw2dq = dk2dq * k3 - dk3dq * k2;
const scalar_t dw2ds = dk2ds * k3 - dk3ds * k2;
const scalar_t dw2dt = dk2dt * k3 - dk3dt * k2;
const scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds);
const scalar_t dw1day = -(dw1dp + dw1dq + dw1dt);
const scalar_t dw1dbx = dw1dm;
const scalar_t dw1dby = dw1dp;
const scalar_t dw1dcx = dw1dn;
const scalar_t dw1dcy = dw1dq;
const scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds);
const scalar_t dw2day = -(dw2dp + dw2dq + dw2dt);
const scalar_t dw2dbx = dw2dm;
const scalar_t dw2dby = dw2dp;
const scalar_t dw2dcx = dw2dn;
const scalar_t dw2dcy = dw2dq;
for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) {
const scalar_t c0 = face_features[start_features_idx + feat_idx];
const scalar_t c1 = face_features[start_features_idx + feat_dim + feat_idx];
const scalar_t c2 = face_features[start_features_idx + feat_dim + feat_dim + feat_idx];
const scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax;
const scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day;
const scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx;
const scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby;
const scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx;
const scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy;
const scalar_t dldI = grad_interpolated_features[start_feat_idx + feat_idx] / (k3 * k3);
atomicAdd(grad_face_vertices_image + start_image_idx + 0, dldI * dIdax);
atomicAdd(grad_face_vertices_image + start_image_idx + 1, dldI * dIday);
atomicAdd(grad_face_vertices_image + start_image_idx + 2, dldI * dIdbx);
atomicAdd(grad_face_vertices_image + start_image_idx + 3, dldI * dIdby);
atomicAdd(grad_face_vertices_image + start_image_idx + 4, dldI * dIdcx);
atomicAdd(grad_face_vertices_image + start_image_idx + 5, dldI * dIdcy);
}
}
}
}
void rasterize_backward_cuda_impl(
const at::Tensor grad_interpolated_features,
const at::Tensor interpolated_features,
const at::Tensor selected_face_idx,
const at::Tensor output_weights,
const at::Tensor face_vertices_image,
const at::Tensor face_features,
at::Tensor grad_face_vertices_image,
at::Tensor grad_face_features,
const float eps) {
const int batch_size = grad_interpolated_features.size(0);
const int height = grad_interpolated_features.size(1);
const int width = grad_interpolated_features.size(2);
const int feat_dim = grad_interpolated_features.size(3);
const int num_faces = grad_face_vertices_image.size(1);
// for bxhxw image size
const int threads = 512;
const int total_num_pixels = batch_size * height * width;
const int blocks = total_num_pixels / threads;
// we exchange block and thread!
AT_DISPATCH_FLOATING_TYPES(grad_interpolated_features.scalar_type(),
"rasterize_backward_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(grad_interpolated_features));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( rasterize_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
grad_interpolated_features.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
output_weights.data_ptr<scalar_t>(),
face_vertices_image.data_ptr<scalar_t>(),
face_features.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
grad_face_features.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, feat_dim, eps);
AT_CUDA_CHECK(hipGetLastError());
}));
}
} // namespace kaolin
#undef PRIVATE_CASE_TYPE_AND_VAL
#undef DISPATCH_INPUT_TYPES
| ef6de077e1b59bdbf157112d148d038337b4f04c.cu | // Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <c10/cuda/CUDAGuard.h>
#include "../../utils.h"
#define PRIVATE_CASE_TYPE_AND_VAL(ENUM_TYPE, TYPE, TYPE_NAME, VAL, ...) \
case ENUM_TYPE: { \
using TYPE_NAME = TYPE; \
const int block_size = VAL; \
return __VA_ARGS__(); \
}
#define DISPATCH_INPUT_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \
[&] { \
switch(TYPE) \
{ \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Float, float, TYPE_NAME, 1024, __VA_ARGS__) \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Double, double, TYPE_NAME, 512, __VA_ARGS__) \
default: \
AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \
} \
}()
namespace kaolin {
template<typename scalar_t, int BLOCK_SIZE>
__global__ void packed_rasterize_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_z,
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const scalar_t* __restrict__ face_features,
const int64_t* __restrict__ first_idx_face_per_mesh,
int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ output_weights,
scalar_t* __restrict__ interpolated_features,
const int batch_size,
const int height,
const int width,
const int num_faces,
const int num_features,
const float multiplier,
const float eps) {
__shared__ scalar_t shm_pointsbbox[BLOCK_SIZE][4];
for (int bidx = blockIdx.y; bidx < batch_size; bidx += gridDim.y) {
for (int start_pixel_idx = blockIdx.x * blockDim.x;
start_pixel_idx < width * height;
start_pixel_idx += gridDim.x * blockDim.x) {
const int pixel_idx = start_pixel_idx + threadIdx.x;
const int wididx = pixel_idx % width;
const int heiidx = (pixel_idx - wididx) / width;
const int first_id_faces = first_idx_face_per_mesh[bidx];
const int last_id_faces = first_idx_face_per_mesh[bidx + 1];
scalar_t max_z0 = -INFINITY;
int max_face_idx = -1;
scalar_t max_w0 = 0.;
scalar_t max_w1 = 0.;
scalar_t max_w2 = 0.;
bool is_active_pixel = heiidx < height;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + pixel_idx;
const int totalidx3 = totalidx1 * 3;
const int totalidxd = totalidx1 * num_features;
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
for (int start_face_idx = first_id_faces;
start_face_idx < last_id_faces;
start_face_idx += BLOCK_SIZE) {
const int remaining_faces = last_id_faces - start_face_idx;
const int num_faces_this_iter = remaining_faces > BLOCK_SIZE ? BLOCK_SIZE : remaining_faces;
__syncthreads();
#pragma unroll
for (int ii = 0; ii < 4; ii++) {
const int _start_idx = start_face_idx * 4 + threadIdx.x + ii * blockDim.x;
if (_start_idx < (last_id_faces * 4)) {
shm_pointsbbox[((threadIdx.x - (threadIdx.x % 4) + ii * blockDim.x) / 4)][threadIdx.x % 4] = \
face_bboxes[_start_idx];
}
}
__syncthreads();
if (!(is_active_pixel)) {
continue;
}
for (int ii = 0; ii < num_faces_this_iter; ii++) {
int face_idx = ii + start_face_idx;
// This is a bounding box of the face
const scalar_t xmin = shm_pointsbbox[ii][0];
const scalar_t ymin = shm_pointsbbox[ii][1];
const scalar_t xmax = shm_pointsbbox[ii][2];
const scalar_t ymax = shm_pointsbbox[ii][3];
// The pixel doesn't lie in the bounding box
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
const int shift1 = face_idx;
const int shift3 = shift1 * 3;
const int shift6 = shift1 * 6;
// if this pixel is covered by this face, then we check its depth and weights
const scalar_t ax = face_vertices_image[shift6 + 0];
const scalar_t ay = face_vertices_image[shift6 + 1];
const scalar_t bx = face_vertices_image[shift6 + 2];
const scalar_t by = face_vertices_image[shift6 + 3];
const scalar_t cx = face_vertices_image[shift6 + 4];
const scalar_t cy = face_vertices_image[shift6 + 5];
const scalar_t a_edge_x = ax - x0;
const scalar_t a_edge_y = ay - y0;
const scalar_t b_edge_x = bx - x0;
const scalar_t b_edge_y = by - y0;
const scalar_t c_edge_x = cx - x0;
const scalar_t c_edge_y = cy - y0;
scalar_t w0 = b_edge_x * c_edge_y - b_edge_y * c_edge_x;
scalar_t w1 = c_edge_x * a_edge_y - c_edge_y * a_edge_x;
scalar_t w2 = a_edge_x * b_edge_y - a_edge_y * b_edge_x;
scalar_t norm = w0 + w1 + w2;
norm += copysign(static_cast<double>(eps),
static_cast<double>(norm));
w0 /= norm;
w1 /= norm;
w2 /= norm;
// The pixel doesn't lie in the triangle
if (w0 < 0. || w1 < 0. || w2 < 0.) {
continue;
}
// if it is perspective, then this way has a little error
// because face plane may not be parallel to the image plane
// but let's ignore it first
const scalar_t az = face_vertices_z[shift3 + 0];
const scalar_t bz = face_vertices_z[shift3 + 1];
const scalar_t cz = face_vertices_z[shift3 + 2];
const scalar_t z0 = w0 * az + w1 * bz + w2 * cz;
// The intersection is not the closest from the camera
if (z0 <= max_z0) {
continue;
}
max_z0 = z0;
max_face_idx = face_idx;
max_w0 = w0;
max_w1 = w1;
max_w2 = w2;
}
}
if (max_face_idx > -1) {
// index
selected_face_idx[totalidx1] = max_face_idx - first_id_faces;
const int shift3d = max_face_idx * 3 * num_features;
// wei
output_weights[totalidx3 + 0] = max_w0;
output_weights[totalidx3 + 1] = max_w1;
output_weights[totalidx3 + 2] = max_w2;
// color
for (int d = 0; d < num_features; d++) {
const scalar_t r0 = face_features[shift3d + d];
const scalar_t r1 = face_features[shift3d + num_features + d];
const scalar_t r2 = face_features[shift3d + num_features + num_features + d];
interpolated_features[totalidxd + d] = max_w0 * r0 + max_w1 * r1 + max_w2 * r2;
}
}
}
}
}
void packed_rasterize_forward_cuda_impl(
const at::Tensor face_vertices_z,
const at::Tensor face_vertices_image,
const at::Tensor face_bboxes,
const at::Tensor face_features,
const at::Tensor num_face_per_mesh,
at::Tensor selected_face_idx,
at::Tensor output_weights,
at::Tensor interpolated_features,
const float multiplier,
const float eps) {
const int num_faces = face_vertices_z.size(1);
const int batch_size = interpolated_features.size(0);
const int height = interpolated_features.size(1);
const int width = interpolated_features.size(2);
const int num_features = interpolated_features.size(3);
const int num_pixels = height * width;
DISPATCH_INPUT_TYPES(face_vertices_z.scalar_type(), scalar_t,
"packed_rasterize_forward_cuda", [&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(face_vertices_z));
auto stream = at::cuda::getCurrentCUDAStream();
const int num_blocks_per_sample = num_pixels / block_size + 1;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(num_blocks_per_sample, 1, 1);
packed_rasterize_forward_cuda_kernel<scalar_t, block_size><<<blocks, threads, 0, stream>>>(
face_vertices_z.data_ptr<scalar_t>(),
face_vertices_image.data_ptr<scalar_t>(),
face_bboxes.data_ptr<scalar_t>(),
face_features.data_ptr<scalar_t>(),
num_face_per_mesh.data_ptr<int64_t>(),
selected_face_idx.data_ptr<int64_t>(),
output_weights.data_ptr<scalar_t>(),
interpolated_features.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, num_features,
multiplier, eps);
AT_CUDA_CHECK(cudaGetLastError());
});
}
template<typename scalar_t>
__global__ void rasterize_backward_cuda_kernel(
const scalar_t* __restrict__ grad_interpolated_features,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ output_weights,
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_features,
scalar_t* __restrict__ grad_face_vertices_image,
scalar_t* __restrict__ grad_face_features,
const int batch_size,
const int height,
const int width,
const int num_faces,
const int feat_dim,
const float eps) {
const int num_pixels = height * width;
// Each iteration is treating a single feature of a single pixel
for (int true_pixel_idx = blockIdx.x * blockDim.x + threadIdx.x;
true_pixel_idx < batch_size * num_pixels;
true_pixel_idx += blockDim.x * gridDim.x) {
const int pixel_idx = true_pixel_idx % num_pixels;
const int batch_idx = (true_pixel_idx - pixel_idx) / num_pixels;
const int start_weight_idx = true_pixel_idx * 3;
const int start_feat_idx = true_pixel_idx * feat_dim;
const int face_idx = selected_face_idx[true_pixel_idx];
if (face_idx >= 0) {
const int true_face_idx = batch_idx * num_faces + face_idx;
const int start_image_idx = true_face_idx * 6;
const int start_features_idx = true_face_idx * 3 * feat_dim;
// gradient of face_features
#pragma unroll
for (int ii = 0; ii < 3; ii++) {
scalar_t w = output_weights[start_weight_idx + ii];
int pointshift = start_features_idx + ii * feat_dim;
for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) {
int colorshift = pointshift + feat_idx;
// this should be atomic operation
scalar_t *addr = grad_face_features + colorshift;
scalar_t val = grad_interpolated_features[start_feat_idx + feat_idx] * w;
atomicAdd(addr, val);
}
}
// gradient of points
// here, we calculate dl/dp
// dl/dp = dldI * dI/dp
// dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp
const scalar_t ax = face_vertices_image[start_image_idx + 0];
const scalar_t ay = face_vertices_image[start_image_idx + 1];
const scalar_t bx = face_vertices_image[start_image_idx + 2];
const scalar_t by = face_vertices_image[start_image_idx + 3];
const scalar_t cx = face_vertices_image[start_image_idx + 4];
const scalar_t cy = face_vertices_image[start_image_idx + 5];
const scalar_t aw = output_weights[start_weight_idx + 0];
const scalar_t bw = output_weights[start_weight_idx + 1];
const scalar_t cw = output_weights[start_weight_idx + 2];
const scalar_t x0 = aw * ax + bw * bx + cw * cx;
const scalar_t y0 = aw * ay + bw * by + cw * cy;
const scalar_t m = bx - ax;
const scalar_t p = by - ay;
const scalar_t n = cx - ax;
const scalar_t q = cy - ay;
const scalar_t s = x0 - ax;
const scalar_t t = y0 - ay;
// m * w1 + n * w2 = s
// p * w1 + q * w2 = t
// w1 = (sq - nt) / (mq - np)
// w2 = (mt - sp) / (mq - np)
const scalar_t k1 = s * q - n * t;
const scalar_t k2 = m * t - s * p;
scalar_t k3 = m * q - n * p;
k3 += copysign(static_cast<double>(eps), static_cast<double>(k3));
const scalar_t dk1dm = 0;
const scalar_t dk1dn = -t;
const scalar_t dk1dp = 0;
const scalar_t dk1dq = s;
const scalar_t dk1ds = q;
const scalar_t dk1dt = -n;
const scalar_t dk2dm = t;
const scalar_t dk2dn = 0;
const scalar_t dk2dp = -s;
const scalar_t dk2dq = 0;
const scalar_t dk2ds = -p;
const scalar_t dk2dt = m;
const scalar_t dk3dm = q;
const scalar_t dk3dn = -p;
const scalar_t dk3dp = -n;
const scalar_t dk3dq = m;
const scalar_t dk3ds = 0;
const scalar_t dk3dt = 0;
// w1 = k1 / k3
// w2 = k2 / k3
// we need divide k3 ^ 2
const scalar_t dw1dm = dk1dm * k3 - dk3dm * k1;
const scalar_t dw1dn = dk1dn * k3 - dk3dn * k1;
const scalar_t dw1dp = dk1dp * k3 - dk3dp * k1;
const scalar_t dw1dq = dk1dq * k3 - dk3dq * k1;
const scalar_t dw1ds = dk1ds * k3 - dk3ds * k1;
const scalar_t dw1dt = dk1dt * k3 - dk3dt * k1;
const scalar_t dw2dm = dk2dm * k3 - dk3dm * k2;
const scalar_t dw2dn = dk2dn * k3 - dk3dn * k2;
const scalar_t dw2dp = dk2dp * k3 - dk3dp * k2;
const scalar_t dw2dq = dk2dq * k3 - dk3dq * k2;
const scalar_t dw2ds = dk2ds * k3 - dk3ds * k2;
const scalar_t dw2dt = dk2dt * k3 - dk3dt * k2;
const scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds);
const scalar_t dw1day = -(dw1dp + dw1dq + dw1dt);
const scalar_t dw1dbx = dw1dm;
const scalar_t dw1dby = dw1dp;
const scalar_t dw1dcx = dw1dn;
const scalar_t dw1dcy = dw1dq;
const scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds);
const scalar_t dw2day = -(dw2dp + dw2dq + dw2dt);
const scalar_t dw2dbx = dw2dm;
const scalar_t dw2dby = dw2dp;
const scalar_t dw2dcx = dw2dn;
const scalar_t dw2dcy = dw2dq;
for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) {
const scalar_t c0 = face_features[start_features_idx + feat_idx];
const scalar_t c1 = face_features[start_features_idx + feat_dim + feat_idx];
const scalar_t c2 = face_features[start_features_idx + feat_dim + feat_dim + feat_idx];
const scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax;
const scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day;
const scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx;
const scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby;
const scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx;
const scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy;
const scalar_t dldI = grad_interpolated_features[start_feat_idx + feat_idx] / (k3 * k3);
atomicAdd(grad_face_vertices_image + start_image_idx + 0, dldI * dIdax);
atomicAdd(grad_face_vertices_image + start_image_idx + 1, dldI * dIday);
atomicAdd(grad_face_vertices_image + start_image_idx + 2, dldI * dIdbx);
atomicAdd(grad_face_vertices_image + start_image_idx + 3, dldI * dIdby);
atomicAdd(grad_face_vertices_image + start_image_idx + 4, dldI * dIdcx);
atomicAdd(grad_face_vertices_image + start_image_idx + 5, dldI * dIdcy);
}
}
}
}
void rasterize_backward_cuda_impl(
const at::Tensor grad_interpolated_features,
const at::Tensor interpolated_features,
const at::Tensor selected_face_idx,
const at::Tensor output_weights,
const at::Tensor face_vertices_image,
const at::Tensor face_features,
at::Tensor grad_face_vertices_image,
at::Tensor grad_face_features,
const float eps) {
const int batch_size = grad_interpolated_features.size(0);
const int height = grad_interpolated_features.size(1);
const int width = grad_interpolated_features.size(2);
const int feat_dim = grad_interpolated_features.size(3);
const int num_faces = grad_face_vertices_image.size(1);
// for bxhxw image size
const int threads = 512;
const int total_num_pixels = batch_size * height * width;
const int blocks = total_num_pixels / threads;
// we exchange block and thread!
AT_DISPATCH_FLOATING_TYPES(grad_interpolated_features.scalar_type(),
"rasterize_backward_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(grad_interpolated_features));
auto stream = at::cuda::getCurrentCUDAStream();
rasterize_backward_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
grad_interpolated_features.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
output_weights.data_ptr<scalar_t>(),
face_vertices_image.data_ptr<scalar_t>(),
face_features.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
grad_face_features.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, feat_dim, eps);
AT_CUDA_CHECK(cudaGetLastError());
}));
}
} // namespace kaolin
#undef PRIVATE_CASE_TYPE_AND_VAL
#undef DISPATCH_INPUT_TYPES
|
6b04bdc9fd0ebe13a7ef1c94e39c54f11af65718.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Reference
__global__ void smooth(float * v_new, const float * v) {
int myIdx = threadIdx.x * gridDim.x + blockIdx.x;
int numThreads = blockDim.x * gridDim.x;
int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1;
int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1;
float myElt = v[myIdx];
float myLeftElt = v[myLeftIdx];
float myRightElt = v[myRightIdx];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
// Your code
__global__ void smooth_shared(float * v_new, const float * v) {
extern __shared__ float s[];
// TODO: Complete the rest of this function
int myIdx = (blockIdx.x-1)*blockDim.x + threadIdx.x;
nt numThreads = blockDim.x * gridDim.x;
s[threadIdx.x] = v[myIdx]
_syncthreads();
float myElt = s[threadIdx.x];
float myLeftElt = (threadIdx.x == 0) ? ((myIdx == 0) ? v[0] :v[myIdx-1]) : v[threadIdx.x-1];
float myRightElt = (threadIdx.x == (blockDim.x-1)) ? ((myIdx == (numThreads - 1)) ? v[numThreads - 1]: v[myIdx+1]) : v[threadIdx.x+1];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 4096;
const int BLOCK_SIZE = 256;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_cmp[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
float h_out_shared[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX;
}
for(int i = 0; i < ARRAY_SIZE; i++) {
h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] +
0.50f * h_in[i] +
0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]);
}
// declare GPU memory pointers
float * d_in, * d_out, * d_out_shared;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMalloc((void **) &d_out_shared, ARRAY_BYTES);
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// launch the kernel
hipLaunchKernelGGL(( smooth), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_out, d_in);
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( smooth_shared), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), (BLOCK_SIZE + 2) * sizeof(float), 0, d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// hipEventSynchronize(stop);
// float elapsedTime;
// hipEventElapsedTime(&elapsedTime, start, stop);
// copy back the result from GPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, hipMemcpyDeviceToHost);
// testing for correctness
compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
hipFree(d_out_shared);
}
| 6b04bdc9fd0ebe13a7ef1c94e39c54f11af65718.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Reference
__global__ void smooth(float * v_new, const float * v) {
int myIdx = threadIdx.x * gridDim.x + blockIdx.x;
int numThreads = blockDim.x * gridDim.x;
int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1;
int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1;
float myElt = v[myIdx];
float myLeftElt = v[myLeftIdx];
float myRightElt = v[myRightIdx];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
// Your code
__global__ void smooth_shared(float * v_new, const float * v) {
extern __shared__ float s[];
// TODO: Complete the rest of this function
int myIdx = (blockIdx.x-1)*blockDim.x + threadIdx.x;
nt numThreads = blockDim.x * gridDim.x;
s[threadIdx.x] = v[myIdx]
_syncthreads();
float myElt = s[threadIdx.x];
float myLeftElt = (threadIdx.x == 0) ? ((myIdx == 0) ? v[0] :v[myIdx-1]) : v[threadIdx.x-1];
float myRightElt = (threadIdx.x == (blockDim.x-1)) ? ((myIdx == (numThreads - 1)) ? v[numThreads - 1]: v[myIdx+1]) : v[threadIdx.x+1];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 4096;
const int BLOCK_SIZE = 256;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_cmp[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
float h_out_shared[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX;
}
for(int i = 0; i < ARRAY_SIZE; i++) {
h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] +
0.50f * h_in[i] +
0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]);
}
// declare GPU memory pointers
float * d_in, * d_out, * d_out_shared;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMalloc((void **) &d_out_shared, ARRAY_BYTES);
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// launch the kernel
smooth<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_out, d_in);
GpuTimer timer;
timer.Start();
smooth_shared<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float)>>>(d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// cudaEventSynchronize(stop);
// float elapsedTime;
// cudaEventElapsedTime(&elapsedTime, start, stop);
// copy back the result from GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// testing for correctness
compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_out_shared);
}
|
cdb4c73f448527f095d2827ba0b2a9c115b390fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <algorithm>
#include <chrono>
using namespace std;
#include "cuda_smith_waterman.h"
/*
* You can add helper functions and variables as you wish.
*/
#define max(a, b) (a > b ? a : b)
__global__ void cuda_sw(char *a, char *b, int a_len, int b_len, int *d_scores, int *max_scores, int y, int* d_scores1, int* d_scores2) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int tn = blockDim.x * gridDim.x;
int max_score = 0;
for (int x = tid; x < b_len + 1; x += tn) {
int j = y - x + 1;
int i = x;
int s = 0;
if (i >= 1 && j >= 1 && i <= b_len && j <= a_len) {
if (y >= 2) {
int ad = sub_mat(a[j - 1], b[i - 1]);
s = max(0, d_scores2[x-1] + ad);
}
s = max(s, d_scores1[x-1] - GAP);
s = max(s, d_scores1[x] - GAP);
max_score = max(max_score, s);
}
d_scores[x] = s;
}
max_scores[tid] = max(max_scores[tid], max_score);
}
int smith_waterman(int blocks_per_grid, int threads_per_block, char *_a, char *_b, int _a_len, int _b_len) {
dim3 blocks(blocks_per_grid);
dim3 threads(threads_per_block);
char *a;
char *b;
int a_len;
int b_len;
int *d_scores, *d_scores1, *d_scores2;
int max_score = 0;
int tn = blocks_per_grid * threads_per_block;
int *d_max_scores;
char *d_a, *d_b;
a = _a, b = _b, a_len = _a_len, b_len = _b_len;
if (b_len < a_len) {
a_len = _b_len;
b_len = _a_len;
a = _b;
b = _a;
}
hipMalloc(&d_scores, sizeof(int) * (b_len + 1));
hipMalloc(&d_scores1, sizeof(int) * (b_len + 1));
hipMalloc(&d_scores2, sizeof(int) * (b_len + 1));
hipMalloc(&d_max_scores, sizeof(int) * tn);
hipMalloc(&d_a, sizeof(int) * a_len);
hipMalloc(&d_b, sizeof(int) * b_len);
hipMemcpy(d_a, a, sizeof(int) * a_len, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(int) * b_len, hipMemcpyHostToDevice);
hipMemset(d_scores, 0, sizeof(int) * (b_len+1));
hipMemset(d_scores1, 0, sizeof(int) * (b_len+1));
hipMemset(d_scores2, 0, sizeof(int) * (b_len+1));
hipMemset(d_max_scores, 0, sizeof(int) * tn);
for (int y = 0; y < a_len + b_len + 1; y++) {
hipLaunchKernelGGL(( cuda_sw) , dim3(blocks), dim3(threads) , 0, 0, d_a, d_b, a_len, b_len, d_scores, d_max_scores, y, d_scores1, d_scores2);
int* t = d_scores2;
d_scores2 = d_scores1;
d_scores1 = d_scores;
d_scores = t;
}
int *h_max_scores = new int[tn];
hipMemcpy(h_max_scores, d_max_scores, sizeof(int) * tn, hipMemcpyDeviceToHost);
for (int i = 0; i < tn; i++) {
max_score = max(max_score, h_max_scores[i]);
}
return max_score;
}
| cdb4c73f448527f095d2827ba0b2a9c115b390fe.cu | #include <iostream>
#include <string>
#include <algorithm>
#include <chrono>
using namespace std;
#include "cuda_smith_waterman.h"
/*
* You can add helper functions and variables as you wish.
*/
#define max(a, b) (a > b ? a : b)
__global__ void cuda_sw(char *a, char *b, int a_len, int b_len, int *d_scores, int *max_scores, int y, int* d_scores1, int* d_scores2) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int tn = blockDim.x * gridDim.x;
int max_score = 0;
for (int x = tid; x < b_len + 1; x += tn) {
int j = y - x + 1;
int i = x;
int s = 0;
if (i >= 1 && j >= 1 && i <= b_len && j <= a_len) {
if (y >= 2) {
int ad = sub_mat(a[j - 1], b[i - 1]);
s = max(0, d_scores2[x-1] + ad);
}
s = max(s, d_scores1[x-1] - GAP);
s = max(s, d_scores1[x] - GAP);
max_score = max(max_score, s);
}
d_scores[x] = s;
}
max_scores[tid] = max(max_scores[tid], max_score);
}
int smith_waterman(int blocks_per_grid, int threads_per_block, char *_a, char *_b, int _a_len, int _b_len) {
dim3 blocks(blocks_per_grid);
dim3 threads(threads_per_block);
char *a;
char *b;
int a_len;
int b_len;
int *d_scores, *d_scores1, *d_scores2;
int max_score = 0;
int tn = blocks_per_grid * threads_per_block;
int *d_max_scores;
char *d_a, *d_b;
a = _a, b = _b, a_len = _a_len, b_len = _b_len;
if (b_len < a_len) {
a_len = _b_len;
b_len = _a_len;
a = _b;
b = _a;
}
cudaMalloc(&d_scores, sizeof(int) * (b_len + 1));
cudaMalloc(&d_scores1, sizeof(int) * (b_len + 1));
cudaMalloc(&d_scores2, sizeof(int) * (b_len + 1));
cudaMalloc(&d_max_scores, sizeof(int) * tn);
cudaMalloc(&d_a, sizeof(int) * a_len);
cudaMalloc(&d_b, sizeof(int) * b_len);
cudaMemcpy(d_a, a, sizeof(int) * a_len, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(int) * b_len, cudaMemcpyHostToDevice);
cudaMemset(d_scores, 0, sizeof(int) * (b_len+1));
cudaMemset(d_scores1, 0, sizeof(int) * (b_len+1));
cudaMemset(d_scores2, 0, sizeof(int) * (b_len+1));
cudaMemset(d_max_scores, 0, sizeof(int) * tn);
for (int y = 0; y < a_len + b_len + 1; y++) {
cuda_sw <<< blocks, threads >>>(d_a, d_b, a_len, b_len, d_scores, d_max_scores, y, d_scores1, d_scores2);
int* t = d_scores2;
d_scores2 = d_scores1;
d_scores1 = d_scores;
d_scores = t;
}
int *h_max_scores = new int[tn];
cudaMemcpy(h_max_scores, d_max_scores, sizeof(int) * tn, cudaMemcpyDeviceToHost);
for (int i = 0; i < tn; i++) {
max_score = max(max_score, h_max_scores[i]);
}
return max_score;
}
|
284ed1c9ae0e8b54a3fb8dfdbf9e567b56650821.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
#include "fbgemm_gpu/cub_namespace_prefix.cuh"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include "fbgemm_gpu/cub_namespace_postfix.cuh"
// clang-format on
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <limits>
#include <mutex>
#include "fbgemm_gpu/dispatch_macros.h"
#include "fbgemm_gpu/embedding_common.h"
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
#include "fbgemm_gpu/sparse_ops_utils.h"
#include "fbgemm_gpu/split_embeddings_utils.cuh"
constexpr size_t kCacheMaxThreads = 512;
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
namespace {
__forceinline__ __host__ __device__ uint32_t round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b) * b;
}
__host__ __device__ inline int32_t unpadded_row_size_in_bytes(
int32_t dim,
SparseType weight_ty) {
if (weight_ty == SparseType::FP32) {
return dim * 4;
}
if (weight_ty == SparseType::FP16) {
return dim * 2;
}
if (weight_ty == SparseType::INT8) {
return dim + 4;
}
if (weight_ty == SparseType::INT4) {
return dim / 2 + 4;
}
if (weight_ty == SparseType::INT2) {
return dim / 4 + 4;
}
return 0;
}
__host__ __device__ inline int32_t padded_row_size_in_bytes(
int32_t dim,
SparseType weight_ty) {
auto r = unpadded_row_size_in_bytes(dim, weight_ty);
return round_up(r, 16);
}
} // namespace
// // TODO: do we care about 64-bit indices? Currently we just ignore.
// __host__ DEVICE_INLINE uint32_t cache_slot(int32_t h_in, int32_t C) {
// // MurmorHash3 32-bit mixing function.
// uint32_t h = (uint32_t)h_in;
// h ^= h >> 16;
// h *= 0x85ebca6b;
// h ^= h >> 13;
// h *= 0xc2b2ae35;
// h ^= h >> 16;
// //
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
// return ((uint64_t)h * (uint64_t)C) >> 32;
// }
__host__ DEVICE_INLINE uint32_t cache_slot(int64_t h_in, int32_t C) {
// MurmurHash3 64-bit mixing function.
uint64_t h = (uint64_t)h_in;
h ^= h >> 33;
h *= 0xff51afd7ed558ccd;
h ^= h >> 33;
h *= 0xc4ceb9fe1a85ec53;
h ^= h >> 33;
return h % (uint32_t)C;
}
int64_t host_lxu_cache_slot(int64_t h_in, int64_t C) {
return static_cast<int64_t>(cache_slot(h_in, static_cast<int32_t>(C)));
}
constexpr int32_t kCacheLocationMissing = -1;
constexpr int64_t kCacheStateInvalid = -1;
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kMaxThreads) void lxu_cache_flush_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t B = lxu_cache_weights.size(0);
int32_t b = blockIdx.x * blockDim.y + threadIdx.y;
if (b >= B) {
return;
}
int32_t slot = b % kWarpSize;
int32_t cache_set = b / kWarpSize;
int64_t current_idx = lxu_cache_state[cache_set][slot];
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[b][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams =
thrust_find_qparams<cache_t>(&lxu_cache_weights[b][0], D_current);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<at::acc_type<cache_t, true>> cache_weights_vec =
weight_row.load(d * 4, qparams);
weight_row.evict(cache_weights_vec, d * 4, qparams);
}
}
}
void lxu_cache_flush_cuda(
Tensor uvm_weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
int64_t total_D,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(lxu_cache_weights.get_device());
int32_t T = D_offsets.numel() - 1;
int32_t S = lxu_cache_weights.size(0);
int32_t tx = std::min<int32_t>(total_D / 4 / T, kMaxThreads);
dim3 threads(tx, kMaxThreads / tx);
dim3 blocks(div_round_up(S, kMaxThreads / tx));
DISPATCH_EMB_CACHE_TYPES(
uvm_weights.type(),
lxu_cache_weights.type(),
"lxu_cache_flush_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && std::is_same<emb_t, at::Half>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
hipLaunchKernelGGL(( lxu_cache_flush_kernel<emb_t, cache_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
uvm_weights
.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void linearize_cache_indices_kernel(
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets,
at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
linear_cache_indices) {
int32_t T = cache_hash_size_cumsum.size(0) - 1;
int64_t total_cache_hash_size = cache_hash_size_cumsum[T];
int32_t B = (offsets.size(0) - 1) / T;
int32_t b_t = blockIdx.x * blockDim.x + threadIdx.x;
int32_t b = b_t % B;
int32_t t = b_t / B;
bool valid = t < T;
int64_t hash_offset = valid ? cache_hash_size_cumsum[t] : -1;
auto indices_start = valid ? offsets[t * B + b] : -1;
int32_t L = valid ? offsets[t * B + b + 1] - indices_start : 0;
int32_t lane_id = threadIdx.x % kWarpSize;
// hash_offset < 0 for non-caching tables
for (int32_t j = 0; j < kWarpSize; ++j) {
auto indices_start_warp = shfl_sync(indices_start, j);
int32_t L_warp = shfl_sync(L, j);
int64_t hash_offset_warp = shfl_sync(hash_offset, j);
if (hash_offset_warp >= 0) {
for (int32_t i = lane_id; i < L_warp; i += kWarpSize) {
auto idx = __ldg(&indices[indices_start_warp + i]);
linear_cache_indices[indices_start_warp + i] = hash_offset_warp + idx;
}
} else {
for (int32_t i = lane_id; i < L_warp; i += kWarpSize) {
linear_cache_indices[indices_start_warp + i] = total_cache_hash_size;
}
}
}
}
Tensor linearize_cache_indices_cuda(
Tensor cache_hash_size_cumsum,
Tensor indices,
Tensor offsets) {
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(cache_hash_size_cumsum.get_device());
auto T = cache_hash_size_cumsum.size(0) - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B >= 0);
auto linear_cache_indices = at::empty_like(indices);
if (B == 0) {
return linear_cache_indices;
}
AT_DISPATCH_INDEX_TYPES(
indices.scalar_type(), "linearize_cache_indices_kernel", [&]() {
hipLaunchKernelGGL(( linearize_cache_indices_kernel),
dim3(div_round_up(B * T, kMaxThreads)),
dim3(kMaxThreads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
linear_cache_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return linear_cache_indices;
}
std::tuple<Tensor, Tensor, c10::optional<Tensor>> get_unique_indices_cuda(
Tensor linear_indices,
int64_t max_indices,
bool compute_count) {
TENSOR_ON_CUDA_GPU(linear_indices);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(linear_indices.get_device());
TORCH_CHECK(linear_indices.numel() < std::numeric_limits<int32_t>::max());
int32_t N = linear_indices.numel();
auto sorted_indices = at::empty_like(linear_indices);
auto unique_indices = at::empty_like(linear_indices);
auto unique_indices_length =
at::empty({1}, linear_indices.options().dtype(at::kInt));
c10::optional<Tensor> unique_indices_count = c10::nullopt;
if (compute_count) {
unique_indices_count = at::empty(
{linear_indices.numel()}, linear_indices.options().dtype(at::kInt));
}
AT_DISPATCH_INDEX_TYPES(
linear_indices.scalar_type(), "get_unique_indices_cuda", [&]() {
// sort indices
size_t temp_storage_bytes_0 = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortKeys(
nullptr,
temp_storage_bytes_0,
linear_indices.data_ptr<index_t>(),
sorted_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(max_indices + 1)) + 1),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage_0 = at::empty(
{static_cast<index_t>(temp_storage_bytes_0)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortKeys(
temp_storage_0.data_ptr(),
temp_storage_bytes_0,
linear_indices.data_ptr<index_t>(),
sorted_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(max_indices + 1)) + 1),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
// get unique indices
if (compute_count) {
size_t temp_storage_bytes_1 = 0;
AT_CUDA_CHECK(
FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRunLengthEncode::Encode(
nullptr,
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_count->data_ptr<int32_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage_1 = at::empty(
{static_cast<index_t>(temp_storage_bytes_1)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(
FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRunLengthEncode::Encode(
temp_storage_1.data_ptr(),
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_count->data_ptr<int32_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
} else {
size_t temp_storage_bytes_1 = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceSelect::Unique(
nullptr,
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage_1 = at::empty(
{static_cast<index_t>(temp_storage_bytes_1)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceSelect::Unique(
temp_storage_1.data_ptr(),
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
});
return std::make_tuple(
unique_indices, unique_indices_length, unique_indices_count);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_find_uncached_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
int64_t max_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> cache_sets,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state) {
int32_t N = unique_indices.size(0);
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
if (n >= *N_unique) {
if (threadIdx.x == 0) {
cache_sets[n] = C; // invalid index, used as sentinel
}
return;
}
int64_t idx = unique_indices[n];
if (idx == max_indices) {
if (threadIdx.x == 0) {
cache_sets[n] = C; // invalid index, used as sentinel
}
return;
}
int32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx;
if (found) {
// mark it as existing.
cache_sets[n] = C; // invalid index, used as sentinel
// mark it as recently accessed so we don't evict.
lru_state[cache_set][slot] = time_stamp;
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
cache_sets[n] = cache_set;
}
}
}
std::pair<Tensor, Tensor> lru_cache_find_uncached_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
int64_t max_indices,
Tensor lxu_cache_state,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lru_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(unique_indices.get_device());
auto cache_sets =
empty_like(unique_indices, unique_indices.options().dtype(at::kInt));
int32_t N = unique_indices.numel();
auto sorted_cache_sets = empty_like(cache_sets);
auto cache_set_sorted_unique_indices = empty_like(unique_indices);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lru_cache_find_uncached_cuda", [&]() {
// Find uncached indices
hipLaunchKernelGGL(( lru_cache_find_uncached_kernel),
dim3(div_round_up(N, kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
max_indices,
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
cache_sets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
time_stamp,
lru_state.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// Sort the cache sets and ids
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
cache_sets.data_ptr<int32_t>(),
sorted_cache_sets.data_ptr<int32_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<index_t>(temp_storage_bytes)},
unique_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
cache_sets.data_ptr<int32_t>(),
sorted_cache_sets.data_ptr<int32_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
});
return {sorted_cache_sets, cache_set_sorted_unique_indices};
}
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_cache_sets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int32_t cache_set = sorted_cache_sets[n];
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t slot_time = lru_state[cache_set][slot];
int64_t costs[1] = {slot_time};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lru_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lru_cost = shfl_sync(sorted_lru_cost, l);
if (insert_current_lru_cost == time_stamp) {
return;
}
int64_t insert_idx = cache_set_sorted_indices[n + l];
int32_t t_insert = cache_index_table_map[insert_idx];
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
// not empty
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
(blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x) *
kWarpSize +
l,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
at::acc_type<cache_t, true> local_min =
std::numeric_limits<at::acc_type<cache_t, true>>::max();
at::acc_type<cache_t, true> local_max =
std::numeric_limits<at::acc_type<cache_t, true>>::lowest();
if (std::is_same<emb_t, uint8_t>::value) {
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec =
weight_row.load(d * 4, qparams); // qparams not used
local_max = max(local_max, vec4_max(cache_weights_vec));
local_min = min(local_min, vec4_min(cache_weights_vec));
}
qparams = warp_find_qparams(local_min, local_max);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams);
weight_row.evict(
cache_weights_vec, d * 4, qparams); // FP32 -> FP16/FP32
}
}
int32_t D_emb = D_insert;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
// insert into cache
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_insert,
nullptr);
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
nullptr,
D_insert,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row_emb.load_qparams();
}
for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) {
auto row = weight_row_emb.load(d * 4, qparams);
weight_row_cache.store(row, d * 4, qparams);
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
lru_state[cache_set][insert_slot] = time_stamp;
}
}
}
void lru_cache_insert_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
DISPATCH_EMB_CACHE_TYPES(
weights.type(),
lxu_cache_weights.type(),
"lru_cache_insert_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
hipLaunchKernelGGL(( lru_cache_insert_kernel<emb_t, cache_t>)
, dim3(div_round_up(N, kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_cache_sets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
cache_set_sorted_unique_indices
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
time_stamp,
lru_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void lru_cache_populate_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// Get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, false);
// Find uncached indices
auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
time_stamp,
lru_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lru_cache_insert_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
time_stamp,
lru_state,
stochastic_rounding);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_byte_kernel(
at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits>
weights_tys,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_cache_sets,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int32_t cache_set = sorted_cache_sets[n];
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t slot_time = lru_state[cache_set][slot];
int64_t costs[1] = {slot_time};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lru_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lru_cost = shfl_sync(sorted_lru_cost, l);
if (insert_current_lru_cost == time_stamp) {
return;
}
index_t insert_idx = cache_set_sorted_indices[n + l];
int32_t t_insert = cache_index_table_map[insert_idx];
SparseType weight_ty_insert =
static_cast<SparseType>(weights_tys[t_insert]);
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
const int32_t D_insert_bytes =
padded_row_size_in_bytes(D_insert, weight_ty_insert);
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
// not empty
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
SparseType weight_ty_current =
static_cast<SparseType>(weights_tys[t_current]);
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
const int32_t D_current_bytes =
padded_row_size_in_bytes(D_current, weight_ty_current);
auto row =
&weights[weights_offset_current + idx_current * D_current_bytes + 0];
auto cache_row =
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
// Evict the cache
for (int32_t d = threadIdx.x; d < D_current_bytes; d += blockDim.x) {
row[d] = cache_row[d]; // uint8_t access
}
}
auto row =
&weights[weights_offset_insert + idx_insert * D_insert_bytes + 0];
auto cache_row = &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
for (int32_t d = threadIdx.x; d < D_insert_bytes; d += blockDim.x) {
cache_row[d] = row[d];
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
lru_state[cache_set][insert_slot] = time_stamp;
}
}
}
void lru_cache_insert_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
AT_DISPATCH_INDEX_TYPES(
cache_set_sorted_unique_indices.scalar_type(),
"lru_cache_insert_byte_cuda",
[&]() {
hipLaunchKernelGGL(( lru_cache_insert_byte_kernel),
dim3(div_round_up(N, kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_cache_sets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
cache_set_sorted_unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(),
time_stamp,
lru_state.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
void lru_cache_populate_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// Get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, false);
// Find uncached indices
auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
time_stamp,
lru_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lru_cache_insert_byte_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
weights_tys,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
time_stamp,
lru_state);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lfu_update_counts_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
unique_indices_count,
at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lfu_state) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= *N_unique) {
return;
}
auto idx = unique_indices[n];
lfu_state[idx] += unique_indices_count[n];
}
void lfu_update_counts_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
Tensor unique_indices_count,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(unique_indices_count);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(unique_indices.get_device());
int32_t N = unique_indices.size(0);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lfu_update_counts_cuda", [&]() {
hipLaunchKernelGGL(( lfu_update_counts_kernel),
dim3(div_round_up(N, kMaxThreads)),
dim3(kMaxThreads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
unique_indices_count
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
});
C10_HIP_KERNEL_LAUNCH_CHECK();
}
constexpr int32_t kCacheSetBits = 24;
constexpr int32_t kLFUCounterBits = 40;
static_assert(kCacheSetBits + kLFUCounterBits == 8 * sizeof(int64_t), "");
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lfu_cache_find_uncached_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
int64_t max_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
uint64_t* __restrict__ cache_sets,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state) {
int32_t N = unique_indices.size(0);
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
if (n >= *N_unique) {
if (threadIdx.x == 0) {
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
return;
}
int64_t idx = unique_indices[n];
if (idx == max_indices) {
if (threadIdx.x == 0) {
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
return;
}
uint32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx;
if (found) {
// mark it as existing.
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
// sort so the highest LFUs come first in the segment.
// assume lfu_state[idx] <= 2^40 - 1 and cache_set < 2^24 -1
cache_sets[n] = ((static_cast<uint64_t>(cache_set) << kLFUCounterBits)) |
((static_cast<uint64_t>(1) << kLFUCounterBits) - 1 - lfu_state[idx]);
}
}
}
std::pair<Tensor, Tensor> lfu_cache_find_uncached_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
int64_t max_indices,
Tensor lxu_cache_state,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(unique_indices.get_device());
auto cache_sets =
empty_like(unique_indices, unique_indices.options().dtype(at::kLong));
int32_t N = unique_indices.numel();
auto sorted_cache_sets = empty_like(cache_sets);
auto cache_set_sorted_unique_indices = empty_like(unique_indices);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lfu_cache_find_uncached_cuda", [&]() {
// Find uncached indices
hipLaunchKernelGGL(( lfu_cache_find_uncached_kernel),
dim3(div_round_up(N, kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
max_indices,
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
(uint64_t*)cache_sets.data_ptr<int64_t>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// Sort the cache sets and ids
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
(uint64_t*)cache_sets.data_ptr<int64_t>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
unique_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX hipcub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
(uint64_t*)cache_sets.data_ptr<int64_t>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
});
return {sorted_cache_sets, cache_set_sorted_unique_indices};
}
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const uint64_t* __restrict__ sorted_cache_sets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 ||
(sorted_cache_sets[n - 1] >> kLFUCounterBits) !=
(sorted_cache_sets[n] >> kLFUCounterBits));
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits);
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique &&
(sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t current_idx = lxu_cache_state[cache_set][slot];
int64_t current_lfu_cost =
(current_idx != static_cast<int64_t>(kCacheStateInvalid))
? lfu_state[current_idx]
: -1;
int64_t costs[1] = {current_lfu_cost};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lfu_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lfu_cost = shfl_sync(sorted_lfu_cost, l);
int64_t insert_idx = cache_set_sorted_indices[n + l];
int64_t insert_lfu_cost = lfu_state[insert_idx];
if (insert_current_lfu_cost > insert_lfu_cost) {
// don't insert.
// all subsequent `current_lfu_cost` values are greater, and all
// subsequent `insert_lfu_cost` values are smaller, so we can exit
// early here.
return;
}
int32_t t_insert = cache_index_table_map[insert_idx];
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
// not empty
if (insert_current_lfu_cost != -1) {
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
(blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x) *
kWarpSize +
l,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
at::acc_type<cache_t, true> local_min =
std::numeric_limits<at::acc_type<cache_t, true>>::max();
at::acc_type<cache_t, true> local_max =
std::numeric_limits<at::acc_type<cache_t, true>>::lowest();
if (std::is_same<emb_t, uint8_t>::value) {
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec =
weight_row.load(d * 4, qparams); // qparams not used
local_max = max(local_max, vec4_max(cache_weights_vec));
local_min = min(local_min, vec4_min(cache_weights_vec));
}
qparams = warp_find_qparams(local_min, local_max);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams);
weight_row.evict(cache_weights_vec, d * 4, qparams);
}
}
// insert into cache
int32_t D_emb = D_insert;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_insert,
nullptr);
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
nullptr,
D_insert,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row_emb.load_qparams();
}
for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) {
auto row = weight_row_emb.load(d * 4, qparams);
weight_row_cache.store(row, d * 4, qparams);
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
}
}
}
void lfu_cache_insert_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
DISPATCH_EMB_CACHE_TYPES(
weights.type(),
lxu_cache_weights.type(),
"lfu_cache_insert_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
hipLaunchKernelGGL(( lfu_cache_insert_kernel<emb_t, cache_t>)
, dim3(div_round_up(N, kCacheMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kCacheMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
cache_set_sorted_unique_indices
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
lfu_state
.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void lfu_cache_populate_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, true);
// update lfu counts
lfu_update_counts_cuda(
unique_indices, unique_indices_length, *unique_indices_count, lfu_state);
// find uncached indices
auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
lfu_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lfu_cache_insert_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
lfu_state,
stochastic_rounding);
}
// In `lfu_cache_insert_kernel`, we use `emb_t` and `cache_t` for the
// high-precision cache implementation, where we can have {FP32, FP16, INT8}
// for embedding precision (data types), and {FP32, FP16} for cache precision
// (data types).
//
// In `lfu_cache_insert_byte_kernel`, we only use uint8_t for the both embedding
// and cache data type (conforming to the inference TBE kernel logics).
// - We pass in `weights_tys` to denote the real data types for the embeddings:
// {FP32, FP16, INT8, INT4}. For example, FP32 is 4 byte element in the byte
// tensor, and INT4 is half byte element in the byte tensor.
// - We only assume that the embedding and cache have the same precisions (the
// real "precision" is determined by `weights_tys` although the data types are
// uint8_t only). Basically no "high-precision cache" support for now.
// - The insert/evict of embedding row from the cache are done in a byte-by-byte
// manner.
template <typename index_t>
__global__
__launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_byte_kernel(
at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits>
weights_tys,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const uint64_t* __restrict__ sorted_cache_sets,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 ||
(sorted_cache_sets[n - 1] >> kLFUCounterBits) !=
(sorted_cache_sets[n] >> kLFUCounterBits));
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits);
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique &&
(sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t current_idx = lxu_cache_state[cache_set][slot];
int64_t current_lfu_cost =
(current_idx != static_cast<int64_t>(kCacheStateInvalid))
? lfu_state[current_idx]
: -1;
int64_t costs[1] = {current_lfu_cost};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lfu_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lfu_cost = shfl_sync(sorted_lfu_cost, l);
index_t insert_idx = cache_set_sorted_indices[n + l];
int64_t insert_lfu_cost = lfu_state[insert_idx];
if (insert_current_lfu_cost > insert_lfu_cost) {
// don't insert.
// all subsequent `current_lfu_cost` values are greater, and all
// subsequent `insert_lfu_cost` values are smaller, so we can exit
// early here.
return;
}
int32_t t_insert = cache_index_table_map[insert_idx];
SparseType weight_ty_insert =
static_cast<SparseType>(weights_tys[t_insert]);
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
const int32_t D_insert_bytes =
padded_row_size_in_bytes(D_insert, weight_ty_insert);
// not empty
if (insert_current_lfu_cost != -1) {
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
int32_t t_current = cache_index_table_map[current_idx];
SparseType weight_ty_current =
static_cast<SparseType>(weights_tys[t_current]);
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
const int32_t D_current_bytes =
padded_row_size_in_bytes(D_current, weight_ty_current);
auto row =
&weights[weights_offset_current + idx_current * D_current_bytes + 0];
auto cache_row =
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
// Evict the cache
for (int32_t d = threadIdx.x; d < D_current_bytes; d += blockDim.x) {
row[d] = cache_row[d]; // uint8_t access
}
}
// insert into cache
auto row =
&weights[weights_offset_insert + idx_insert * D_insert_bytes + 0];
auto cache_row = &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
for (int32_t d = threadIdx.x; d < D_insert_bytes; d += blockDim.x) {
cache_row[d] = row[d];
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
}
}
}
void lfu_cache_insert_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys)
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
AT_DISPATCH_INDEX_TYPES(
cache_set_sorted_unique_indices.scalar_type(),
"lfu_cache_insert_byte_cuda",
[&]() {
hipLaunchKernelGGL(( lfu_cache_insert_byte_kernel),
dim3(div_round_up(N, kCacheMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kCacheMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
cache_set_sorted_unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
});
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void lfu_cache_populate_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys)
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, true);
// update lfu counts
lfu_update_counts_cuda(
unique_indices, unique_indices_length, *unique_indices_count, lfu_state);
// find uncached indices
auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
lfu_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lfu_cache_insert_byte_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
weights_tys,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
lfu_state);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lxu_cache_lookup_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
linear_cache_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
lxu_cache_locations) {
const int32_t C = lxu_cache_state.size(0);
const int32_t N = linear_cache_indices.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
int64_t idx = linear_cache_indices[n];
int32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = (__ldg((&lxu_cache_state[cache_set][0]) + slot) == idx);
if (found) {
lxu_cache_locations[n] = cache_set * kWarpSize + slot;
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
lxu_cache_locations[n] = kCacheLocationMissing;
}
}
}
Tensor lxu_cache_lookup_cuda(
Tensor linear_cache_indices,
Tensor lxu_cache_state) {
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(linear_cache_indices.get_device());
const auto N = linear_cache_indices.numel();
auto lxu_cache_locations = empty_like(
linear_cache_indices, linear_cache_indices.options().dtype(at::kInt));
if (linear_cache_indices.numel() == 0) {
// nothing to do
return lxu_cache_locations;
}
const dim3 threads(kWarpSize, kMaxThreads / kWarpSize);
const dim3 blocks(div_round_up(N, kMaxThreads / kWarpSize));
AT_DISPATCH_INDEX_TYPES(
linear_cache_indices.scalar_type(), "lxu_cache_lookup_cuda", [&]() {
hipLaunchKernelGGL(( lxu_cache_lookup_kernel),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
linear_cache_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_locations
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return lxu_cache_locations;
}
| 284ed1c9ae0e8b54a3fb8dfdbf9e567b56650821.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
#include "fbgemm_gpu/cub_namespace_prefix.cuh"
#include "cub/device/device_radix_sort.cuh"
#include "cub/device/device_run_length_encode.cuh"
#include "cub/device/device_select.cuh"
#include "fbgemm_gpu/cub_namespace_postfix.cuh"
// clang-format on
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/cuda/CUDAGuard.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <limits>
#include <mutex>
#include "fbgemm_gpu/dispatch_macros.h"
#include "fbgemm_gpu/embedding_common.h"
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
#include "fbgemm_gpu/sparse_ops_utils.h"
#include "fbgemm_gpu/split_embeddings_utils.cuh"
constexpr size_t kCacheMaxThreads = 512;
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
namespace {
__forceinline__ __host__ __device__ uint32_t round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b) * b;
}
__host__ __device__ inline int32_t unpadded_row_size_in_bytes(
int32_t dim,
SparseType weight_ty) {
if (weight_ty == SparseType::FP32) {
return dim * 4;
}
if (weight_ty == SparseType::FP16) {
return dim * 2;
}
if (weight_ty == SparseType::INT8) {
return dim + 4;
}
if (weight_ty == SparseType::INT4) {
return dim / 2 + 4;
}
if (weight_ty == SparseType::INT2) {
return dim / 4 + 4;
}
return 0;
}
__host__ __device__ inline int32_t padded_row_size_in_bytes(
int32_t dim,
SparseType weight_ty) {
auto r = unpadded_row_size_in_bytes(dim, weight_ty);
return round_up(r, 16);
}
} // namespace
// // TODO: do we care about 64-bit indices? Currently we just ignore.
// __host__ DEVICE_INLINE uint32_t cache_slot(int32_t h_in, int32_t C) {
// // MurmorHash3 32-bit mixing function.
// uint32_t h = (uint32_t)h_in;
// h ^= h >> 16;
// h *= 0x85ebca6b;
// h ^= h >> 13;
// h *= 0xc2b2ae35;
// h ^= h >> 16;
// //
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
// return ((uint64_t)h * (uint64_t)C) >> 32;
// }
__host__ DEVICE_INLINE uint32_t cache_slot(int64_t h_in, int32_t C) {
// MurmurHash3 64-bit mixing function.
uint64_t h = (uint64_t)h_in;
h ^= h >> 33;
h *= 0xff51afd7ed558ccd;
h ^= h >> 33;
h *= 0xc4ceb9fe1a85ec53;
h ^= h >> 33;
return h % (uint32_t)C;
}
int64_t host_lxu_cache_slot(int64_t h_in, int64_t C) {
return static_cast<int64_t>(cache_slot(h_in, static_cast<int32_t>(C)));
}
constexpr int32_t kCacheLocationMissing = -1;
constexpr int64_t kCacheStateInvalid = -1;
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kMaxThreads) void lxu_cache_flush_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t B = lxu_cache_weights.size(0);
int32_t b = blockIdx.x * blockDim.y + threadIdx.y;
if (b >= B) {
return;
}
int32_t slot = b % kWarpSize;
int32_t cache_set = b / kWarpSize;
int64_t current_idx = lxu_cache_state[cache_set][slot];
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[b][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams =
thrust_find_qparams<cache_t>(&lxu_cache_weights[b][0], D_current);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<at::acc_type<cache_t, true>> cache_weights_vec =
weight_row.load(d * 4, qparams);
weight_row.evict(cache_weights_vec, d * 4, qparams);
}
}
}
void lxu_cache_flush_cuda(
Tensor uvm_weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
int64_t total_D,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(lxu_cache_weights.get_device());
int32_t T = D_offsets.numel() - 1;
int32_t S = lxu_cache_weights.size(0);
int32_t tx = std::min<int32_t>(total_D / 4 / T, kMaxThreads);
dim3 threads(tx, kMaxThreads / tx);
dim3 blocks(div_round_up(S, kMaxThreads / tx));
DISPATCH_EMB_CACHE_TYPES(
uvm_weights.type(),
lxu_cache_weights.type(),
"lxu_cache_flush_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && std::is_same<emb_t, at::Half>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
lxu_cache_flush_kernel<emb_t, cache_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
uvm_weights
.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void linearize_cache_indices_kernel(
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets,
at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
linear_cache_indices) {
int32_t T = cache_hash_size_cumsum.size(0) - 1;
int64_t total_cache_hash_size = cache_hash_size_cumsum[T];
int32_t B = (offsets.size(0) - 1) / T;
int32_t b_t = blockIdx.x * blockDim.x + threadIdx.x;
int32_t b = b_t % B;
int32_t t = b_t / B;
bool valid = t < T;
int64_t hash_offset = valid ? cache_hash_size_cumsum[t] : -1;
auto indices_start = valid ? offsets[t * B + b] : -1;
int32_t L = valid ? offsets[t * B + b + 1] - indices_start : 0;
int32_t lane_id = threadIdx.x % kWarpSize;
// hash_offset < 0 for non-caching tables
for (int32_t j = 0; j < kWarpSize; ++j) {
auto indices_start_warp = shfl_sync(indices_start, j);
int32_t L_warp = shfl_sync(L, j);
int64_t hash_offset_warp = shfl_sync(hash_offset, j);
if (hash_offset_warp >= 0) {
for (int32_t i = lane_id; i < L_warp; i += kWarpSize) {
auto idx = __ldg(&indices[indices_start_warp + i]);
linear_cache_indices[indices_start_warp + i] = hash_offset_warp + idx;
}
} else {
for (int32_t i = lane_id; i < L_warp; i += kWarpSize) {
linear_cache_indices[indices_start_warp + i] = total_cache_hash_size;
}
}
}
}
Tensor linearize_cache_indices_cuda(
Tensor cache_hash_size_cumsum,
Tensor indices,
Tensor offsets) {
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(cache_hash_size_cumsum.get_device());
auto T = cache_hash_size_cumsum.size(0) - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B >= 0);
auto linear_cache_indices = at::empty_like(indices);
if (B == 0) {
return linear_cache_indices;
}
AT_DISPATCH_INDEX_TYPES(
indices.scalar_type(), "linearize_cache_indices_kernel", [&]() {
linearize_cache_indices_kernel<<<
div_round_up(B * T, kMaxThreads),
kMaxThreads,
0,
at::cuda::getCurrentCUDAStream()>>>(
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
linear_cache_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return linear_cache_indices;
}
std::tuple<Tensor, Tensor, c10::optional<Tensor>> get_unique_indices_cuda(
Tensor linear_indices,
int64_t max_indices,
bool compute_count) {
TENSOR_ON_CUDA_GPU(linear_indices);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(linear_indices.get_device());
TORCH_CHECK(linear_indices.numel() < std::numeric_limits<int32_t>::max());
int32_t N = linear_indices.numel();
auto sorted_indices = at::empty_like(linear_indices);
auto unique_indices = at::empty_like(linear_indices);
auto unique_indices_length =
at::empty({1}, linear_indices.options().dtype(at::kInt));
c10::optional<Tensor> unique_indices_count = c10::nullopt;
if (compute_count) {
unique_indices_count = at::empty(
{linear_indices.numel()}, linear_indices.options().dtype(at::kInt));
}
AT_DISPATCH_INDEX_TYPES(
linear_indices.scalar_type(), "get_unique_indices_cuda", [&]() {
// sort indices
size_t temp_storage_bytes_0 = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortKeys(
nullptr,
temp_storage_bytes_0,
linear_indices.data_ptr<index_t>(),
sorted_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(max_indices + 1)) + 1),
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage_0 = at::empty(
{static_cast<index_t>(temp_storage_bytes_0)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortKeys(
temp_storage_0.data_ptr(),
temp_storage_bytes_0,
linear_indices.data_ptr<index_t>(),
sorted_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(max_indices + 1)) + 1),
at::cuda::getCurrentCUDAStream(),
false));
// get unique indices
if (compute_count) {
size_t temp_storage_bytes_1 = 0;
AT_CUDA_CHECK(
FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRunLengthEncode::Encode(
nullptr,
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_count->data_ptr<int32_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage_1 = at::empty(
{static_cast<index_t>(temp_storage_bytes_1)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(
FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRunLengthEncode::Encode(
temp_storage_1.data_ptr(),
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_count->data_ptr<int32_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::cuda::getCurrentCUDAStream(),
false));
} else {
size_t temp_storage_bytes_1 = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceSelect::Unique(
nullptr,
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage_1 = at::empty(
{static_cast<index_t>(temp_storage_bytes_1)},
linear_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceSelect::Unique(
temp_storage_1.data_ptr(),
temp_storage_bytes_1,
sorted_indices.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
unique_indices_length.data_ptr<int32_t>(),
N,
at::cuda::getCurrentCUDAStream(),
false));
}
});
return std::make_tuple(
unique_indices, unique_indices_length, unique_indices_count);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_find_uncached_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
int64_t max_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> cache_sets,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state) {
int32_t N = unique_indices.size(0);
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
if (n >= *N_unique) {
if (threadIdx.x == 0) {
cache_sets[n] = C; // invalid index, used as sentinel
}
return;
}
int64_t idx = unique_indices[n];
if (idx == max_indices) {
if (threadIdx.x == 0) {
cache_sets[n] = C; // invalid index, used as sentinel
}
return;
}
int32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx;
if (found) {
// mark it as existing.
cache_sets[n] = C; // invalid index, used as sentinel
// mark it as recently accessed so we don't evict.
lru_state[cache_set][slot] = time_stamp;
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
cache_sets[n] = cache_set;
}
}
}
std::pair<Tensor, Tensor> lru_cache_find_uncached_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
int64_t max_indices,
Tensor lxu_cache_state,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lru_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(unique_indices.get_device());
auto cache_sets =
empty_like(unique_indices, unique_indices.options().dtype(at::kInt));
int32_t N = unique_indices.numel();
auto sorted_cache_sets = empty_like(cache_sets);
auto cache_set_sorted_unique_indices = empty_like(unique_indices);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lru_cache_find_uncached_cuda", [&]() {
// Find uncached indices
lru_cache_find_uncached_kernel<<<
div_round_up(N, kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
max_indices,
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
cache_sets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
time_stamp,
lru_state.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Sort the cache sets and ids
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
cache_sets.data_ptr<int32_t>(),
sorted_cache_sets.data_ptr<int32_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1),
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<index_t>(temp_storage_bytes)},
unique_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
cache_sets.data_ptr<int32_t>(),
sorted_cache_sets.data_ptr<int32_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1),
at::cuda::getCurrentCUDAStream(),
false));
});
return {sorted_cache_sets, cache_set_sorted_unique_indices};
}
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_cache_sets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int32_t cache_set = sorted_cache_sets[n];
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t slot_time = lru_state[cache_set][slot];
int64_t costs[1] = {slot_time};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lru_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lru_cost = shfl_sync(sorted_lru_cost, l);
if (insert_current_lru_cost == time_stamp) {
return;
}
int64_t insert_idx = cache_set_sorted_indices[n + l];
int32_t t_insert = cache_index_table_map[insert_idx];
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
// not empty
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
(blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x) *
kWarpSize +
l,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
at::acc_type<cache_t, true> local_min =
std::numeric_limits<at::acc_type<cache_t, true>>::max();
at::acc_type<cache_t, true> local_max =
std::numeric_limits<at::acc_type<cache_t, true>>::lowest();
if (std::is_same<emb_t, uint8_t>::value) {
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec =
weight_row.load(d * 4, qparams); // qparams not used
local_max = max(local_max, vec4_max(cache_weights_vec));
local_min = min(local_min, vec4_min(cache_weights_vec));
}
qparams = warp_find_qparams(local_min, local_max);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams);
weight_row.evict(
cache_weights_vec, d * 4, qparams); // FP32 -> FP16/FP32
}
}
int32_t D_emb = D_insert;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
// insert into cache
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_insert,
nullptr);
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
nullptr,
D_insert,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row_emb.load_qparams();
}
for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) {
auto row = weight_row_emb.load(d * 4, qparams);
weight_row_cache.store(row, d * 4, qparams);
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
lru_state[cache_set][insert_slot] = time_stamp;
}
}
}
void lru_cache_insert_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
DISPATCH_EMB_CACHE_TYPES(
weights.type(),
lxu_cache_weights.type(),
"lru_cache_insert_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
lru_cache_insert_kernel<emb_t, cache_t>
<<<div_round_up(N, kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_cache_sets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
cache_set_sorted_unique_indices
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
time_stamp,
lru_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void lru_cache_populate_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// Get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, false);
// Find uncached indices
auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
time_stamp,
lru_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lru_cache_insert_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
time_stamp,
lru_state,
stochastic_rounding);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_byte_kernel(
at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits>
weights_tys,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_cache_sets,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
int64_t time_stamp,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits> lru_state) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int32_t cache_set = sorted_cache_sets[n];
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t slot_time = lru_state[cache_set][slot];
int64_t costs[1] = {slot_time};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lru_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lru_cost = shfl_sync(sorted_lru_cost, l);
if (insert_current_lru_cost == time_stamp) {
return;
}
index_t insert_idx = cache_set_sorted_indices[n + l];
int32_t t_insert = cache_index_table_map[insert_idx];
SparseType weight_ty_insert =
static_cast<SparseType>(weights_tys[t_insert]);
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
const int32_t D_insert_bytes =
padded_row_size_in_bytes(D_insert, weight_ty_insert);
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
// not empty
if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) {
// evict from slot to backing storage
int32_t t_current = cache_index_table_map[current_idx];
SparseType weight_ty_current =
static_cast<SparseType>(weights_tys[t_current]);
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
const int32_t D_current_bytes =
padded_row_size_in_bytes(D_current, weight_ty_current);
auto row =
&weights[weights_offset_current + idx_current * D_current_bytes + 0];
auto cache_row =
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
// Evict the cache
for (int32_t d = threadIdx.x; d < D_current_bytes; d += blockDim.x) {
row[d] = cache_row[d]; // uint8_t access
}
}
auto row =
&weights[weights_offset_insert + idx_insert * D_insert_bytes + 0];
auto cache_row = &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
for (int32_t d = threadIdx.x; d < D_insert_bytes; d += blockDim.x) {
cache_row[d] = row[d];
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
lru_state[cache_set][insert_slot] = time_stamp;
}
}
}
void lru_cache_insert_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
AT_DISPATCH_INDEX_TYPES(
cache_set_sorted_unique_indices.scalar_type(),
"lru_cache_insert_byte_cuda",
[&]() {
lru_cache_insert_byte_kernel<<<
div_round_up(N, kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_cache_sets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
cache_set_sorted_unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(),
time_stamp,
lru_state.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
void lru_cache_populate_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
int64_t time_stamp,
Tensor lru_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lru_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// Get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, false);
// Find uncached indices
auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
time_stamp,
lru_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lru_cache_insert_byte_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
weights_tys,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
time_stamp,
lru_state);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lfu_update_counts_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
unique_indices_count,
at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lfu_state) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= *N_unique) {
return;
}
auto idx = unique_indices[n];
lfu_state[idx] += unique_indices_count[n];
}
void lfu_update_counts_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
Tensor unique_indices_count,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(unique_indices_count);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(unique_indices.get_device());
int32_t N = unique_indices.size(0);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lfu_update_counts_cuda", [&]() {
lfu_update_counts_kernel<<<
div_round_up(N, kMaxThreads),
kMaxThreads,
0,
at::cuda::getCurrentCUDAStream()>>>(
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
unique_indices_count
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
constexpr int32_t kCacheSetBits = 24;
constexpr int32_t kLFUCounterBits = 40;
static_assert(kCacheSetBits + kLFUCounterBits == 8 * sizeof(int64_t), "");
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lfu_cache_find_uncached_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
unique_indices,
const int32_t* __restrict__ N_unique,
int64_t max_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
uint64_t* __restrict__ cache_sets,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state) {
int32_t N = unique_indices.size(0);
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
if (n >= *N_unique) {
if (threadIdx.x == 0) {
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
return;
}
int64_t idx = unique_indices[n];
if (idx == max_indices) {
if (threadIdx.x == 0) {
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
return;
}
uint32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx;
if (found) {
// mark it as existing.
cache_sets[n] =
(static_cast<uint64_t>(C)
<< kLFUCounterBits); // invalid index, used as sentinel
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
// sort so the highest LFUs come first in the segment.
// assume lfu_state[idx] <= 2^40 - 1 and cache_set < 2^24 -1
cache_sets[n] = ((static_cast<uint64_t>(cache_set) << kLFUCounterBits)) |
((static_cast<uint64_t>(1) << kLFUCounterBits) - 1 - lfu_state[idx]);
}
}
}
std::pair<Tensor, Tensor> lfu_cache_find_uncached_cuda(
Tensor unique_indices,
Tensor unique_indices_length,
int64_t max_indices,
Tensor lxu_cache_state,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(unique_indices.get_device());
auto cache_sets =
empty_like(unique_indices, unique_indices.options().dtype(at::kLong));
int32_t N = unique_indices.numel();
auto sorted_cache_sets = empty_like(cache_sets);
auto cache_set_sorted_unique_indices = empty_like(unique_indices);
AT_DISPATCH_INDEX_TYPES(
unique_indices.scalar_type(), "lfu_cache_find_uncached_cuda", [&]() {
// Find uncached indices
lfu_cache_find_uncached_kernel<<<
div_round_up(N, kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
max_indices,
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
(uint64_t*)cache_sets.data_ptr<int64_t>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Sort the cache sets and ids
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
(uint64_t*)cache_sets.data_ptr<int64_t>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
unique_indices.options().dtype(at::kByte));
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
(uint64_t*)cache_sets.data_ptr<int64_t>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
unique_indices.data_ptr<index_t>(),
cache_set_sorted_unique_indices.data_ptr<index_t>(),
N,
0,
int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits,
at::cuda::getCurrentCUDAStream(),
false));
});
return {sorted_cache_sets, cache_set_sorted_unique_indices};
}
template <typename emb_t, typename cache_t>
__global__ __launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_kernel(
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const uint64_t* __restrict__ sorted_cache_sets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state,
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 ||
(sorted_cache_sets[n - 1] >> kLFUCounterBits) !=
(sorted_cache_sets[n] >> kLFUCounterBits));
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits);
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique &&
(sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t current_idx = lxu_cache_state[cache_set][slot];
int64_t current_lfu_cost =
(current_idx != static_cast<int64_t>(kCacheStateInvalid))
? lfu_state[current_idx]
: -1;
int64_t costs[1] = {current_lfu_cost};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lfu_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lfu_cost = shfl_sync(sorted_lfu_cost, l);
int64_t insert_idx = cache_set_sorted_indices[n + l];
int64_t insert_lfu_cost = lfu_state[insert_idx];
if (insert_current_lfu_cost > insert_lfu_cost) {
// don't insert.
// all subsequent `current_lfu_cost` values are greater, and all
// subsequent `insert_lfu_cost` values are smaller, so we can exit
// early here.
return;
}
int32_t t_insert = cache_index_table_map[insert_idx];
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
// not empty
if (insert_current_lfu_cost != -1) {
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
int32_t t_current = cache_index_table_map[current_idx];
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
int32_t D_emb = D_current;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_current + idx_current * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_current,
nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
(blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x) *
kWarpSize +
l,
&state);
weight_row.set_stoc_state(&state);
}
float2 qparams;
at::acc_type<cache_t, true> local_min =
std::numeric_limits<at::acc_type<cache_t, true>>::max();
at::acc_type<cache_t, true> local_max =
std::numeric_limits<at::acc_type<cache_t, true>>::lowest();
if (std::is_same<emb_t, uint8_t>::value) {
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec =
weight_row.load(d * 4, qparams); // qparams not used
local_max = max(local_max, vec4_max(cache_weights_vec));
local_min = min(local_min, vec4_min(cache_weights_vec));
}
qparams = warp_find_qparams(local_min, local_max);
if (threadIdx.x == 0) {
weight_row.store_qparams(qparams);
}
}
for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) {
Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams);
weight_row.evict(cache_weights_vec, d * 4, qparams);
}
}
// insert into cache
int32_t D_emb = D_insert;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0],
D_insert,
nullptr);
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(
&weights[weights_offset_insert + idx_insert * D_emb + 0],
nullptr,
D_insert,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row_emb.load_qparams();
}
for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) {
auto row = weight_row_emb.load(d * 4, qparams);
weight_row_cache.store(row, d * 4, qparams);
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
}
}
}
void lfu_cache_insert_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
DISPATCH_EMB_CACHE_TYPES(
weights.type(),
lxu_cache_weights.type(),
"lfu_cache_insert_kernel_2",
([&] {
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
lfu_cache_insert_kernel<emb_t, cache_t>
<<<div_round_up(N, kCacheMaxThreads / kWarpSize),
dim3(kWarpSize, kCacheMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
cache_set_sorted_unique_indices
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
lfu_state
.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(),
stochastic_rounding,
rng_engine_inputs);
}));
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void lfu_cache_populate_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state,
bool stochastic_rounding) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, true);
// update lfu counts
lfu_update_counts_cuda(
unique_indices, unique_indices_length, *unique_indices_count, lfu_state);
// find uncached indices
auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
lfu_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lfu_cache_insert_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
lfu_state,
stochastic_rounding);
}
// In `lfu_cache_insert_kernel`, we use `emb_t` and `cache_t` for the
// high-precision cache implementation, where we can have {FP32, FP16, INT8}
// for embedding precision (data types), and {FP32, FP16} for cache precision
// (data types).
//
// In `lfu_cache_insert_byte_kernel`, we only use uint8_t for the both embedding
// and cache data type (conforming to the inference TBE kernel logics).
// - We pass in `weights_tys` to denote the real data types for the embeddings:
// {FP32, FP16, INT8, INT4}. For example, FP32 is 4 byte element in the byte
// tensor, and INT4 is half byte element in the byte tensor.
// - We only assume that the embedding and cache have the same precisions (the
// real "precision" is determined by `weights_tys` although the data types are
// uint8_t only). Basically no "high-precision cache" support for now.
// - The insert/evict of embedding row from the cache are done in a byte-by-byte
// manner.
template <typename index_t>
__global__
__launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_byte_kernel(
at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> weights,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
cache_hash_size_cumsum,
const at::PackedTensorAccessor64<int32_t, 1, at::RestrictPtrTraits>
cache_index_table_map,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
weights_offsets,
const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits>
weights_tys,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
D_offsets,
const uint64_t* __restrict__ sorted_cache_sets,
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
cache_set_sorted_indices,
const int32_t* __restrict__ N_unique,
at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits>
lxu_cache_weights,
const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
lfu_state) {
int32_t C = lxu_cache_state.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= *N_unique) {
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(n == 0 ||
(sorted_cache_sets[n - 1] >> kLFUCounterBits) !=
(sorted_cache_sets[n] >> kLFUCounterBits));
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits);
if (cache_set == C) {
// ignore the already-existing elements
return;
}
int32_t SL = 1;
while (n + SL < *N_unique &&
(sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) {
SL += 1;
}
// now, we need to insert the (unique!) values in indices[n:n + SL] into
// our slots.
int32_t slot = threadIdx.x;
int64_t current_idx = lxu_cache_state[cache_set][slot];
int64_t current_lfu_cost =
(current_idx != static_cast<int64_t>(kCacheStateInvalid))
? lfu_state[current_idx]
: -1;
int64_t costs[1] = {current_lfu_cost};
int32_t slots[1] = {slot};
BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots);
int32_t sorted_slot = slots[0];
int64_t sorted_lfu_cost = costs[0];
for (int32_t l = 0; l < min(SL, kWarpSize); ++l) {
int32_t insert_slot = shfl_sync(sorted_slot, l);
int64_t insert_current_lfu_cost = shfl_sync(sorted_lfu_cost, l);
index_t insert_idx = cache_set_sorted_indices[n + l];
int64_t insert_lfu_cost = lfu_state[insert_idx];
if (insert_current_lfu_cost > insert_lfu_cost) {
// don't insert.
// all subsequent `current_lfu_cost` values are greater, and all
// subsequent `insert_lfu_cost` values are smaller, so we can exit
// early here.
return;
}
int32_t t_insert = cache_index_table_map[insert_idx];
SparseType weight_ty_insert =
static_cast<SparseType>(weights_tys[t_insert]);
int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert];
int64_t weights_offset_insert = weights_offsets[t_insert];
int32_t D_start_insert = D_offsets[t_insert];
int32_t D_end_insert = D_offsets[t_insert + 1];
int32_t D_insert = D_end_insert - D_start_insert;
const int32_t D_insert_bytes =
padded_row_size_in_bytes(D_insert, weight_ty_insert);
// not empty
if (insert_current_lfu_cost != -1) {
// ensure that threadIdx.x is the only thread reading/writing to
// lxu_cache_state
int64_t current_idx =
threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0;
current_idx = shfl_sync(current_idx, 0);
int32_t t_current = cache_index_table_map[current_idx];
SparseType weight_ty_current =
static_cast<SparseType>(weights_tys[t_current]);
int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current];
int64_t weights_offset_current = weights_offsets[t_current];
int32_t D_start_current = D_offsets[t_current];
int32_t D_end_current = D_offsets[t_current + 1];
int32_t D_current = D_end_current - D_start_current;
const int32_t D_current_bytes =
padded_row_size_in_bytes(D_current, weight_ty_current);
auto row =
&weights[weights_offset_current + idx_current * D_current_bytes + 0];
auto cache_row =
&lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
// Evict the cache
for (int32_t d = threadIdx.x; d < D_current_bytes; d += blockDim.x) {
row[d] = cache_row[d]; // uint8_t access
}
}
// insert into cache
auto row =
&weights[weights_offset_insert + idx_insert * D_insert_bytes + 0];
auto cache_row = &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0];
for (int32_t d = threadIdx.x; d < D_insert_bytes; d += blockDim.x) {
cache_row[d] = row[d];
}
if (threadIdx.x == 0) {
lxu_cache_state[cache_set][insert_slot] = insert_idx;
}
}
}
void lfu_cache_insert_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor sorted_cache_sets,
Tensor cache_set_sorted_unique_indices,
Tensor unique_indices_length,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys)
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(sorted_cache_sets);
TENSOR_ON_CUDA_GPU(cache_set_sorted_unique_indices);
TENSOR_ON_CUDA_GPU(unique_indices_length);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
int32_t N = cache_set_sorted_unique_indices.numel();
AT_DISPATCH_INDEX_TYPES(
cache_set_sorted_unique_indices.scalar_type(),
"lfu_cache_insert_byte_cuda",
[&]() {
lfu_cache_insert_byte_kernel<<<
div_round_up(N, kCacheMaxThreads / kWarpSize),
dim3(kWarpSize, kCacheMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(),
cache_hash_size_cumsum
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
cache_index_table_map
.packed_accessor64<int32_t, 1, at::RestrictPtrTraits>(),
weights_offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
(uint64_t*)sorted_cache_sets.data_ptr<int64_t>(),
cache_set_sorted_unique_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
unique_indices_length.data_ptr<int32_t>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_weights
.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(),
lfu_state.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>());
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void lfu_cache_populate_byte_cuda(
Tensor weights,
Tensor cache_hash_size_cumsum,
int64_t total_cache_hash_size,
Tensor cache_index_table_map,
Tensor weights_offsets,
Tensor weights_tys,
Tensor D_offsets,
Tensor linear_cache_indices,
Tensor lxu_cache_state,
Tensor lxu_cache_weights,
Tensor lfu_state) {
TENSOR_ON_CUDA_GPU(weights);
TENSOR_ON_CUDA_GPU(cache_hash_size_cumsum);
TENSOR_ON_CUDA_GPU(cache_index_table_map);
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(weights_tys)
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(lfu_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(weights.get_device());
TORCH_CHECK(
linear_cache_indices.numel() < std::numeric_limits<int32_t>::max());
if (linear_cache_indices.numel() == 0) {
// nothing to do
return;
}
// get unqiue indices
Tensor unique_indices;
Tensor unique_indices_length;
c10::optional<Tensor> unique_indices_count;
std::tie(unique_indices, unique_indices_length, unique_indices_count) =
get_unique_indices_cuda(
linear_cache_indices, total_cache_hash_size, true);
// update lfu counts
lfu_update_counts_cuda(
unique_indices, unique_indices_length, *unique_indices_count, lfu_state);
// find uncached indices
auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda(
unique_indices,
unique_indices_length,
total_cache_hash_size,
lxu_cache_state,
lfu_state);
auto sorted_cache_sets = cache_sets_and_unique_indices.first;
auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second;
// insert caching weights
lfu_cache_insert_byte_cuda(
weights,
cache_hash_size_cumsum,
cache_index_table_map,
weights_offsets,
weights_tys,
D_offsets,
sorted_cache_sets,
cache_set_sorted_unique_indices,
unique_indices_length,
lxu_cache_state,
lxu_cache_weights,
lfu_state);
}
template <typename index_t>
__global__ __launch_bounds__(kMaxThreads) void lxu_cache_lookup_kernel(
const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits>
linear_cache_indices,
const at::PackedTensorAccessor32<int64_t, 2, at::RestrictPtrTraits>
lxu_cache_state,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
lxu_cache_locations) {
const int32_t C = lxu_cache_state.size(0);
const int32_t N = linear_cache_indices.size(0);
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n >= N) {
return;
}
int64_t idx = linear_cache_indices[n];
int32_t cache_set = cache_slot(idx, C);
auto slot = threadIdx.x;
bool found = (__ldg((&lxu_cache_state[cache_set][0]) + slot) == idx);
if (found) {
lxu_cache_locations[n] = cache_set * kWarpSize + slot;
}
#ifdef __HIP_PLATFORM_HCC__
// FIXME: __any_sync with mask isn't supported by HIP yet.
// See https://fburl.com/fvy7j0lq for the similar context.
// assert false here with https://fburl.com/pfm7enw2
assert(false);
if (!__any(found)) {
#else
if (!__any_sync(0xFFFFFFFF, found)) {
#endif
if (threadIdx.x == 0) {
lxu_cache_locations[n] = kCacheLocationMissing;
}
}
}
Tensor lxu_cache_lookup_cuda(
Tensor linear_cache_indices,
Tensor lxu_cache_state) {
TENSOR_ON_CUDA_GPU(linear_cache_indices);
TENSOR_ON_CUDA_GPU(lxu_cache_state);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(linear_cache_indices.get_device());
const auto N = linear_cache_indices.numel();
auto lxu_cache_locations = empty_like(
linear_cache_indices, linear_cache_indices.options().dtype(at::kInt));
if (linear_cache_indices.numel() == 0) {
// nothing to do
return lxu_cache_locations;
}
const dim3 threads(kWarpSize, kMaxThreads / kWarpSize);
const dim3 blocks(div_round_up(N, kMaxThreads / kWarpSize));
AT_DISPATCH_INDEX_TYPES(
linear_cache_indices.scalar_type(), "lxu_cache_lookup_cuda", [&]() {
lxu_cache_lookup_kernel<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
linear_cache_indices
.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(),
lxu_cache_state
.packed_accessor32<int64_t, 2, at::RestrictPtrTraits>(),
lxu_cache_locations
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return lxu_cache_locations;
}
|
806950387e27bd9a09c2c35a5ce4399d48abe764.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Treecode.h"
#define NWARPS_OCTREE2 3
#define NWARPS2 NWARPS_OCTREE2
#define NWARPS (1<<NWARPS2)
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "cuda_primitives.h"
namespace treeBuild
{
static __forceinline__ __device__ void computeGridAndBlockSize(dim3 &grid, dim3 &block, const int np)
{
const int NTHREADS = (1<<NWARPS_OCTREE2) * WARP_SIZE;
block = dim3(NTHREADS);
assert(np > 0);
grid = dim3(min(max(np/(NTHREADS*4),1), 512));
}
__device__ unsigned int retirementCount = 0;
__constant__ int d_node_max;
__constant__ int d_cell_max;
__device__ unsigned int nnodes = 0;
__device__ unsigned int nleaves = 0;
__device__ unsigned int nlevels = 0;
__device__ unsigned int nbodies_leaf = 0;
__device__ unsigned int ncells = 0;
__device__ int *memPool;
__device__ CellData *cellDataList;
__device__ void *ptclVel_tmp;
template<int NTHREAD2>
static __device__ float2 minmax_block(float2 sum)
{
extern __shared__ float shdata[];
float *shMin = shdata;
float *shMax = shdata + (1<<NTHREAD2);
const int tid = threadIdx.x;
shMin[tid] = sum.x;
shMax[tid] = sum.y;
__syncthreads();
#pragma unroll
for (int i = NTHREAD2-1; i >= 6; i--)
{
const int offset = 1 << i;
if (tid < offset)
{
shMin[tid] = sum.x = fminf(sum.x, shMin[tid + offset]);
shMax[tid] = sum.y = fmaxf(sum.y, shMax[tid + offset]);
}
__syncthreads();
}
if (tid < 32)
{
volatile float *vshMin = shMin;
volatile float *vshMax = shMax;
#pragma unroll
for (int i = 5; i >= 0; i--)
{
const int offset = 1 << i;
vshMin[tid] = sum.x = fminf(sum.x, vshMin[tid + offset]);
vshMax[tid] = sum.y = fmaxf(sum.y, vshMax[tid + offset]);
}
}
__syncthreads();
return sum;
}
template<const int NTHREAD2, typename T>
static __global__ void computeBoundingBox(
const int n,
__out Position<T> *minmax_ptr,
__out Box<T> *box_ptr,
const Particle4<T> *ptclPos)
{
const int NTHREAD = 1<<NTHREAD2;
const int NBLOCK = NTHREAD;
Position<T> bmin(T(+1e10)), bmax(T(-1e10));
const int nbeg = blockIdx.x * NTHREAD + threadIdx.x;
for (int i = nbeg; i < n; i += NBLOCK*NTHREAD)
if (i < n)
{
const Particle4<T> p = ptclPos[i];
const Position<T> pos(p.x(), p.y(), p.z());
bmin = Position<T>::min(bmin, pos);
bmax = Position<T>::max(bmax, pos);
}
float2 res;
res = minmax_block<NTHREAD2>(make_float2(bmin.x, bmax.x)); bmin.x = res.x; bmax.x = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.y, bmax.y)); bmin.y = res.x; bmax.y = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.z, bmax.z)); bmin.z = res.x; bmax.z = res.y;
if (threadIdx.x == 0)
{
minmax_ptr[blockIdx.x ] = bmin;
minmax_ptr[blockIdx.x + NBLOCK] = bmax;
}
__shared__ int lastBlock; /* with bool, doesn't compile in CUDA 6.0 */
__threadfence();
__syncthreads();
if (threadIdx.x == 0)
{
const int ticket = atomicInc(&retirementCount, NBLOCK);
lastBlock = (ticket == NBLOCK - 1);
}
__syncthreads();
#if 1
if (lastBlock)
{
bmin = minmax_ptr[threadIdx.x];
bmax = minmax_ptr[threadIdx.x + NBLOCK];
float2 res;
res = minmax_block<NTHREAD2>(make_float2(bmin.x, bmax.x)); bmin.x = res.x; bmax.x = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.y, bmax.y)); bmin.y = res.x; bmax.y = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.z, bmax.z)); bmin.z = res.x; bmax.z = res.y;
__syncthreads();
if (threadIdx.x == 0)
{
#if 0
printf("bmin= %g %g %g \n", bmin.x, bmin.y, bmin.z);
printf("bmax= %g %g %g \n", bmax.x, bmax.y, bmax.z);
#endif
const Position<T> cvec((bmax.x+bmin.x)*T(0.5), (bmax.y+bmin.y)*T(0.5), (bmax.z+bmin.z)*T(0.5));
const Position<T> hvec((bmax.x-bmin.x)*T(0.5), (bmax.y-bmin.y)*T(0.5), (bmax.z-bmin.z)*T(0.5));
const T h = fmax(hvec.z, fmax(hvec.y, hvec.x));
T hsize = T(1.0);
while (hsize > h) hsize *= T(0.5);
while (hsize < h) hsize *= T(2.0);
const int NMAXLEVEL = 20;
const T hquant = hsize / T(1<<NMAXLEVEL);
const long long nx = (long long)(cvec.x/hquant);
const long long ny = (long long)(cvec.y/hquant);
const long long nz = (long long)(cvec.z/hquant);
const Position<T> centre(hquant * T(nx), hquant * T(ny), hquant * T(nz));
*box_ptr = Box<T>(centre, hsize);
retirementCount = 0;
}
}
#endif
}
/*******************/
template<int NLEAF, typename T, bool STOREIDX>
static __global__ void
__launch_bounds__( 256, 8)
buildOctant(
Box<T> box,
const int cellParentIndex,
const int cellIndexBase,
const int octantMask,
__out int *octCounterBase,
Particle4<T> *ptcl,
Particle4<T> *buff,
const int level = 0)
{
typedef typename vec<4,T>::type T4;
/* compute laneIdx & warpIdx for each of the threads:
* the thread block contains only 8 warps
* a warp is responsible for a single octant of the cell
*/
const int laneIdx = threadIdx.x & (WARP_SIZE-1);
const int warpIdx = threadIdx.x >> WARP_SIZE2;
/* We launch a 2D grid:
* the y-corrdinate carries info about which parent cell to process
* the x-coordinate is just a standard approach for CUDA parallelism
*/
const int octant2process = (octantMask >> (3*blockIdx.y)) & 0x7;
/* get the pointer to atomic data that for a given octant */
int *octCounter = octCounterBase + blockIdx.y*(8+8+8+64+8);
/* read data about the current cell */
const int data = octCounter[laneIdx];
const int nBeg = __shfl(data, 1, WARP_SIZE);
const int nEnd = __shfl(data, 2, WARP_SIZE);
/* if we are not at the root level, compute the geometric box
* of the cell */
if (!STOREIDX)
box = ChildBox(box, octant2process);
/* countes number of particles in each octant of a child octant */
__shared__ int nShChildrenFine[NWARPS][9][8];
__shared__ int nShChildren[8][8];
Box<T> *shChildBox = (Box<T>*)&nShChildren[0][0];
int *shdata = (int*)&nShChildrenFine[0][0][0];
#pragma unroll
for (int i = 0; i < 8*9*NWARPS; i += NWARPS*WARP_SIZE)
if (i + threadIdx.x < 8*9*NWARPS)
shdata[i + threadIdx.x] = 0;
if (laneIdx == 0 && warpIdx < 8)
shChildBox[warpIdx] = ChildBox(box, warpIdx);
__syncthreads();
/* process particle array */
const int nBeg_block = nBeg + blockIdx.x * blockDim.x;
for (int i = nBeg_block; i < nEnd; i += gridDim.x * blockDim.x)
{
Particle4<T> p4 = ptcl[min(i+threadIdx.x, nEnd-1)];
int p4octant = p4.get_oct();
if (STOREIDX)
{
p4.set_idx(i + threadIdx.x);
p4octant = Octant(box.centre, Position<T>(p4.x(), p4.y(), p4.z()));
}
p4octant = i+threadIdx.x < nEnd ? p4octant : 0xF;
/* compute suboctant of the octant into which particle will fall */
if (p4octant < 8)
{
const int p4subOctant = Octant(shChildBox[p4octant].centre, Position<T>(p4.x(), p4.y(), p4.z()));
p4.set_oct(p4subOctant);
}
/* compute number of particles in each of the octants that will be processed by thead block */
int np = 0;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
const int sum = warpBinReduce(p4octant == octant);
if (octant == laneIdx)
np = sum;
}
/* increment atomic counters in a single instruction for thread-blocks to participated */
int addrB0;
if (laneIdx < 8)
addrB0 = atomicAdd(&octCounter[8+8+laneIdx], np);
/* compute addresses where to write data */
int cntr = 32;
int addrW = -1;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
const int sum = warpBinReduce(p4octant == octant);
if (sum > 0)
{
const int offset = warpBinExclusiveScan1(p4octant == octant);
const int addrB = __shfl(addrB0, octant, WARP_SIZE);
if (p4octant == octant)
addrW = addrB + offset;
cntr -= sum;
if (cntr == 0) break;
}
}
/* write the data in a single instruction */
if (addrW >= 0)
buff[addrW] = p4;
/* count how many particles in suboctants in each of the octants */
cntr = 32;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
if (cntr == 0) break;
const int sum = warpBinReduce(p4octant == octant);
if (sum > 0)
{
const int subOctant = p4octant == octant ? p4.get_oct() : -1;
#pragma unroll
for (int k = 0; k < 8; k += 4)
{
const int4 sum = make_int4(
warpBinReduce(k+0 == subOctant),
warpBinReduce(k+1 == subOctant),
warpBinReduce(k+2 == subOctant),
warpBinReduce(k+3 == subOctant));
if (laneIdx == 0)
{
int4 value = *(int4*)&nShChildrenFine[warpIdx][octant][k];
value.x += sum.x;
value.y += sum.y;
value.z += sum.z;
value.w += sum.w;
*(int4*)&nShChildrenFine[warpIdx][octant][k] = value;
}
}
cntr -= sum;
}
}
}
__syncthreads();
if (warpIdx >= 8) return;
#pragma unroll
for (int k = 0; k < 8; k += 4)
{
int4 nSubOctant = laneIdx < NWARPS ? (*(int4*)&nShChildrenFine[laneIdx][warpIdx][k]) : make_int4(0,0,0,0);
#pragma unroll
for (int i = NWARPS2-1; i >= 0; i--)
{
nSubOctant.x += __shfl_xor(nSubOctant.x, 1<<i, NWARPS);
nSubOctant.y += __shfl_xor(nSubOctant.y, 1<<i, NWARPS);
nSubOctant.z += __shfl_xor(nSubOctant.z, 1<<i, NWARPS);
nSubOctant.w += __shfl_xor(nSubOctant.w, 1<<i, NWARPS);
}
if (laneIdx == 0)
*(int4*)&nShChildren[warpIdx][k] = nSubOctant;
}
__syncthreads();
if (laneIdx < 8)
if (nShChildren[warpIdx][laneIdx] > 0)
atomicAdd(&octCounter[8+16+warpIdx*8 + laneIdx], nShChildren[warpIdx][laneIdx]);
__syncthreads(); /* must be present, otherwise race conditions occurs between parent & children */
/* detect last thread block for unique y-coordinate of the grid:
* mind, this cannot be done on the host, because we don't detect last
* block on the grid, but instead the last x-block for each of the y-coordainte of the grid
* this should increase the degree of parallelism
*/
int *shmem = &nShChildren[0][0];
if (warpIdx == 0)
shmem[laneIdx] = 0;
int &lastBlock = shmem[0];
if (threadIdx.x == 0)
{
const int ticket = atomicAdd(octCounter, 1);
lastBlock = (ticket == gridDim.x-1);
}
__syncthreads();
if (!lastBlock) return;
__syncthreads();
/* okay, we are in the last thread block, do the analysis and decide what to do next */
if (warpIdx == 0)
shmem[laneIdx] = 0;
if (threadIdx.x == 0)
atomicCAS(&nlevels, level, level+1);
__syncthreads();
/* compute beginning and then end addresses of the sorted particles in the child cell */
const int nCell = __shfl(data, 8+warpIdx, WARP_SIZE);
const int nEnd1 = octCounter[8+8+warpIdx];
const int nBeg1 = nEnd1 - nCell;
if (laneIdx == 0)
shmem[warpIdx] = nCell;
__syncthreads();
const int npCell = laneIdx < 8 ? shmem[laneIdx] : 0;
/* compute number of children that needs to be further split, and cmopute their offsets */
const int2 nSubNodes = warpBinExclusiveScan(npCell > NLEAF);
const int2 nLeaves = warpBinExclusiveScan(npCell > 0 && npCell <= NLEAF);
if (warpIdx == 0 && laneIdx < 8)
{
shmem[8 +laneIdx] = nSubNodes.x;
shmem[16+laneIdx] = nLeaves.x;
}
int nCellmax = npCell;
#pragma unroll
for (int i = 2; i >= 0; i--)
nCellmax = max(nCellmax, __shfl_xor(nCellmax, 1<<i, WARP_SIZE));
/* if there is at least one cell to split, increment nuumber of the nodes */
if (threadIdx.x == 0 && nSubNodes.y > 0)
{
shmem[16+8] = atomicAdd(&nnodes,nSubNodes.y);
#if 1 /* temp solution, a better one is to use RingBuffer */
assert(shmem[16+8] < d_node_max);
#endif
}
/* writing linking info, parent, child and particle's list */
const int nChildrenCell = warpBinReduce(npCell > 0);
if (threadIdx.x == 0 && nChildrenCell > 0)
{
const int cellFirstChildIndex = atomicAdd(&ncells, nChildrenCell);
#if 1
assert(cellFirstChildIndex + nChildrenCell < d_cell_max);
#endif
/*** keep in mind, the 0-level will be overwritten ***/
assert(nChildrenCell > 0);
assert(nChildrenCell <= 8);
const CellData cellData(level,cellParentIndex, nBeg, nEnd, cellFirstChildIndex, nChildrenCell-1);
assert(cellData.first() < ncells);
assert(cellData.isNode());
cellDataList[cellIndexBase + blockIdx.y] = cellData;
shmem[16+9] = cellFirstChildIndex;
}
__syncthreads();
const int cellFirstChildIndex = shmem[16+9];
/* compute atomic data offset for cell that need to be split */
const int next_node = shmem[16+8];
int *octCounterNbase = &memPool[next_node*(8+8+8+64+8)];
const int nodeOffset = shmem[8 +warpIdx];
const int leafOffset = shmem[16+warpIdx];
/* if cell needs to be split, populate it shared atomic data */
if (nCell > NLEAF)
{
int *octCounterN = octCounterNbase + nodeOffset*(8+8+8+64+8);
/* number of particles in each cell's subcells */
const int nSubCell = laneIdx < 8 ? octCounter[8+16+warpIdx*8 + laneIdx] : 0;
/* compute offsets */
int cellOffset = nSubCell;
#pragma unroll
for(int i = 0; i < 3; i++) /* log2(8) steps */
cellOffset = shfl_scan_add_step(cellOffset, 1 << i);
cellOffset -= nSubCell;
/* store offset in memory */
cellOffset = __shfl_up(cellOffset, 8, WARP_SIZE);
if (laneIdx < 8) cellOffset = nSubCell;
else cellOffset += nBeg1;
cellOffset = __shfl_up(cellOffset, 8, WARP_SIZE);
if (laneIdx < 8) cellOffset = 0;
if (laneIdx == 1) cellOffset = nBeg1;
if (laneIdx == 2) cellOffset = nEnd1;
if (laneIdx < 24)
octCounterN[laneIdx] = cellOffset;
}
/***************************/
/* launch child kernel */
/***************************/
/* warps coorperate so that only 1 kernel needs to be launched by a thread block
* with larger degree of paralellism */
if (nSubNodes.y > 0 && warpIdx == 0)
{
/* build octant mask */
int octant_mask = npCell > NLEAF ? (laneIdx << (3*nSubNodes.x)) : 0;
#pragma unroll
for (int i = 4; i >= 0; i--)
octant_mask |= __shfl_xor(octant_mask, 1<<i, WARP_SIZE);
if (threadIdx.x == 0)
{
dim3 grid, block;
computeGridAndBlockSize(grid, block, nCellmax);
#if 0
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
grid.y = nSubNodes.y; /* each y-coordinate of the grid will be busy for each parent cell */
atomicAdd(&n_scheduled,1);
atomicAdd(&n_in_que, 1);
atomicMax(&n_in_que_max, n_in_que);
#if defined(FASTMODE) && NWARPS==8
if (nCellmax <= block.x)
{
grid.x = 1;
hipLaunchKernelGGL(( buildOctantSingle<NLEAF,T>), dim3(grid),dim3(block),0,stream,
box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
}
else
#endif
hipLaunchKernelGGL(( buildOctant<NLEAF,T,false>), dim3(grid),dim3(block),0,stream,
box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
#else
grid.y = nSubNodes.y; /* each y-coordinate of the grid will be busy for each parent cell */
#if defined(FASTMODE) && NWARPS==8
if (nCellmax <= block.x)
{
grid.x = 1;
hipLaunchKernelGGL(( buildOctantSingle<NLEAF,T>), dim3(grid),dim3(block), 0, 0,
box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
}
else
#endif
hipLaunchKernelGGL(( buildOctant<NLEAF,T,false>), dim3(grid),dim3(block), 0, 0,
box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
#endif
const hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf(" launch failed 1: %s level= %d n =%d \n", hipGetErrorString(err), level);
assert(0);
}
}
}
/******************/
/* process leaves */
/******************/
if (nCell <= NLEAF && nCell > 0)
{
if (laneIdx == 0)
{
atomicAdd(&nleaves,1);
atomicAdd(&nbodies_leaf, nEnd1-nBeg1);
const CellData leafData(level+1, cellIndexBase+blockIdx.y, nBeg1, nEnd1);
assert(!leafData.isNode());
cellDataList[cellFirstChildIndex + nSubNodes.y + leafOffset] = leafData;
}
if (!(level&1))
{
for (int i = nBeg1+laneIdx; i < nEnd1; i += WARP_SIZE)
if (i < nEnd1)
{
Particle4<T> pos = buff[i];
Particle4<T> vel = ((Particle4<T>*)ptclVel_tmp)[pos.get_idx()];
#ifdef PSHFL_SANITY_CHECK
pos.mass() = T(pos.get_idx());
#else
pos.mass() = vel.mass();
#endif
ptcl[i] = pos;
buff[i] = vel;
}
}
else
{
for (int i = nBeg1+laneIdx; i < nEnd1; i += WARP_SIZE)
if (i < nEnd1)
{
Particle4<T> pos = buff[i];
Particle4<T> vel = ((Particle4<T>*)ptclVel_tmp)[pos.get_idx()];
#ifdef PSHFL_SANITY_CHECK
pos.mass() = T(pos.get_idx());
#else
pos.mass() = vel.mass();
#endif
buff[i] = pos;
ptcl[i] = vel;
}
}
}
}
template<typename T>
static __global__ void countAtRootNode(
const int n,
__out int *octCounter,
const Box<T> box,
const Particle4<T> *ptclPos)
{
int np_octant[8] = {0};
const int beg = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = beg; i < n; i += gridDim.x * blockDim.x)
if (i < n)
{
const Particle4<T> p = ptclPos[i];
const Position<T> pos(p.x(), p.y(), p.z());
const int octant = Octant(box.centre, pos);
np_octant[0] += (octant == 0);
np_octant[1] += (octant == 1);
np_octant[2] += (octant == 2);
np_octant[3] += (octant == 3);
np_octant[4] += (octant == 4);
np_octant[5] += (octant == 5);
np_octant[6] += (octant == 6);
np_octant[7] += (octant == 7);
};
const int laneIdx = threadIdx.x & (WARP_SIZE-1);
#pragma unroll
for (int k = 0; k < 8; k++)
{
int np = np_octant[k];
#pragma unroll
for (int i = 4; i >= 0; i--)
np += __shfl_xor(np, 1<<i, WARP_SIZE);
if (laneIdx == 0)
atomicAdd(&octCounter[8+k],np);
}
}
template<int NLEAF, typename T>
static __global__ void buildOctree(
const int n,
const Box<T> *domain,
CellData *d_cellDataList,
int *stack_memory_pool,
Particle4<T> *ptcl,
Particle4<T> *buff,
Particle4<T> *d_ptclVel,
int *ncells_return = NULL)
{
cellDataList = d_cellDataList;
ptclVel_tmp = (void*)d_ptclVel;
memPool = stack_memory_pool;
#if 0
printf("n= %d\n", n);
printf("d_node_max= %d\n", d_node_max);
printf("d_cell_max= %d\n", d_cell_max);
printf("GPU: box_centre= %g %g %g hsize= %g\n",
domain->centre.x,
domain->centre.y,
domain->centre.z,
domain->hsize);
#endif
int *octCounter = new int[8+8];
for (int k = 0; k < 16; k++)
octCounter[k] = 0;
hipLaunchKernelGGL(( countAtRootNode<T>), dim3(256), dim3(256), 0, 0, n, octCounter, *domain, ptcl);
assert(hipGetLastError() == hipSuccess);
hipDeviceSynchronize();
#if 1
int total = 0;
for (int k = 8; k < 16; k++)
{
printf("octCounter[%d]= %d\n", k-8, octCounter[k]);
total += octCounter[k];
}
printf("total= %d n= %d\n", total, n);
#endif
int *octCounterN = new int[8+8+8+64+8];
#pragma unroll
for (int k = 0; k < 8; k++)
{
octCounterN[ k] = 0;
octCounterN[8+ k] = octCounter[8+k ];
octCounterN[8+8 +k] = k == 0 ? 0 : octCounterN[8+8+k-1] + octCounterN[8+k-1];
octCounterN[8+16+k] = 0;
}
#pragma unroll
for (int k = 8; k < 64; k++)
octCounterN[8+16+k] = 0;
#if 0
for (int k = 0; k < 8; k++)
printf("k= %d n = %d offset= %d \n",
k, octCounterN[8+k], octCounterN[8+8+k]);
#endif
#ifdef IOCOUNT
io_words = 0;
#endif
nnodes = 0;
nleaves = 0;
nlevels = 0;
ncells = 0;
nbodies_leaf = 0;
octCounterN[1] = 0;
octCounterN[2] = n;
dim3 grid, block;
computeGridAndBlockSize(grid, block, n);
#if 1
hipLaunchKernelGGL(( buildOctant<NLEAF,T,true>), dim3(grid), dim3(block), 0, 0,
*domain, 0, 0, 0, octCounterN, ptcl, buff);
assert(hipDeviceSynchronize() == hipSuccess);
#endif
printf(" nptcl = %d\n", n);
printf(" nb_leaf= %d\n", nbodies_leaf);
printf(" nnodes = %d\n", nnodes);
printf(" nleaves= %d\n", nleaves);
printf(" ncells= %d\n", ncells);
printf(" nlevels= %d\n", nlevels);
if (ncells_return != NULL)
*ncells_return = ncells;
#ifdef IOCOUNT
printf(" io= %g MB \n" ,io_words*4.0/1024.0/1024.0);
#endif
delete [] octCounter;
delete [] octCounterN;
}
static __global__ void
get_cell_levels(const int n, const CellData cellList[], CellData cellListOut[], int key[], int value[])
{
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n) return;
const CellData cell = cellList[idx];
key [idx] = cell.level();
value[idx] = idx;
cellListOut[idx] = cell;
}
static __global__ void
write_newIdx(const int n, const int value[], int moved_to_idx[])
{
const int newIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (newIdx >= n) return;
const int oldIdx = value[newIdx];
moved_to_idx[oldIdx] = newIdx;
}
static __global__ void
compute_level_begIdx(const int n, const int levels[], int2 level_begendIdx[])
{
const int gidx = blockIdx.x*blockDim.x + threadIdx.x;
if (gidx >= n) return;
extern __shared__ int shLevels[];
const int tid = threadIdx.x;
shLevels[tid+1] = levels[gidx];
int shIdx = 0;
int gmIdx = max(blockIdx.x*blockDim.x-1,0);
if (tid == 1)
{
shIdx = blockDim.x+1;
gmIdx = min(blockIdx.x*blockDim.x + blockDim.x,n-1);
}
if (tid < 2)
shLevels[shIdx] = levels[gmIdx];
__syncthreads();
const int idx = tid+1;
const int currLevel = shLevels[idx];
const int prevLevel = shLevels[idx-1];
if (currLevel != prevLevel || gidx == 0)
level_begendIdx[currLevel].x = gidx;
const int nextLevel = shLevels[idx+1];
if (currLevel != nextLevel || gidx == n-1)
level_begendIdx[currLevel].y = gidx;
}
__device__ unsigned int leafIdx_counter = 0;
static __global__ void
shuffle_cells(const int n, const int value[], const int moved_to_idx[], const CellData cellListIn[], CellData cellListOut[])
{
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n) return;
const int mapIdx = value[idx];
CellData cell = cellListIn[mapIdx];
if (cell.isNode())
{
const int firstOld = cell.first();
const int firstNew = moved_to_idx[firstOld];
cell.update_first(firstNew);
}
if (cell.parent() > 0)
cell.update_parent(moved_to_idx[cell.parent()]);
cellListOut[idx] = cell;
if (threadIdx.x == 0 && blockIdx.x == 0)
leafIdx_counter = 0;
}
template<int NTHREAD2>
static __global__
void collect_leaves(const int n, const CellData *cellList, int *leafList)
{
const int gidx = blockDim.x*blockIdx.x + threadIdx.x;
const CellData cell = cellList[min(gidx,n-1)];
__shared__ int shdata[1<<NTHREAD2];
int value = gidx < n && cell.isLeaf();
shdata[threadIdx.x] = value;
#pragma unroll
for (int offset2 = 0; offset2 < NTHREAD2; offset2++)
{
const int offset = 1 << offset2;
__syncthreads();
if (threadIdx.x >= offset)
value += shdata[threadIdx.x - offset];
__syncthreads();
shdata[threadIdx.x] = value;
}
const int nwrite = shdata[threadIdx.x];
const int scatter = nwrite - (gidx < n && cell.isLeaf());
__syncthreads();
if (threadIdx.x == blockDim.x-1 && nwrite > 0)
shdata[0] = atomicAdd(&leafIdx_counter, nwrite);
__syncthreads();
if (cell.isLeaf())
leafList[shdata[0] + scatter] = gidx;
}
}
template<typename real_t>
void Treecode<real_t>::buildTree(const int nLeaf)
{
this->nLeaf = nLeaf;
assert(nLeaf == 16 || nLeaf == 24 || nLeaf == 32 || nLeaf == 48 || nLeaf == 64);
/* compute bounding box */
{
const int NTHREAD2 = 8;
const int NTHREAD = 1<<NTHREAD2;
const int NBLOCK = NTHREAD;
assert(2*NBLOCK <= 2048); /* see Treecode constructor for d_minmax allocation */
hipDeviceSynchronize();
kernelSuccess("cudaDomainSize0");
const double t0 = rtc();
hipLaunchKernelGGL(( treeBuild::computeBoundingBox<NTHREAD2,real_t>), dim3(NBLOCK),dim3(NTHREAD),NTHREAD*sizeof(float2), 0,
nPtcl, d_minmax, d_domain, d_ptclPos);
kernelSuccess("cudaDomainSize");
const double dt = rtc() - t0;
fprintf(stderr, " cudaDomainSize done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
}
/*** build tree ***/
CUDA_SAFE_CALL(hipMemcpyToSymbol(treeBuild::d_node_max, &node_max, sizeof(int), 0, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpyToSymbol(treeBuild::d_cell_max, &cell_max, sizeof(int), 0, hipMemcpyHostToDevice));
// hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount,16384);
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<16,real_t,true>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<16,real_t,false>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<24,real_t,true>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<24,real_t,false>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<32,real_t,true>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<32,real_t,false>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<48,real_t,true>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<48,real_t,false>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<64,real_t,true>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipFuncSetCacheConfig(&treeBuild::buildOctant<64,real_t,false>, hipFuncCachePreferShared));
CUDA_SAFE_CALL(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte));
{
CUDA_SAFE_CALL(hipMemset(d_stack_memory_pool,0,stack_size*sizeof(int)));
hipDeviceSynchronize();
const double t0 = rtc();
switch(nLeaf)
{
case 16:
hipLaunchKernelGGL(( treeBuild::buildOctree<16,real_t>), dim3(1),dim3(1), 0, 0,
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 24:
hipLaunchKernelGGL(( treeBuild::buildOctree<24,real_t>), dim3(1),dim3(1), 0, 0,
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 32:
hipLaunchKernelGGL(( treeBuild::buildOctree<32,real_t>), dim3(1),dim3(1), 0, 0,
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 48:
hipLaunchKernelGGL(( treeBuild::buildOctree<48,real_t>), dim3(1),dim3(1), 0, 0,
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 64:
hipLaunchKernelGGL(( treeBuild::buildOctree<64,real_t>), dim3(1),dim3(1), 0, 0,
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
default:
assert(0);
}
kernelSuccess("buildOctree");
const double t1 = rtc();
const double dt = t1 - t0;
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&nLevels, treeBuild::nlevels, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&nCells, treeBuild::ncells, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&nNodes, treeBuild::nnodes, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&nLeaves, treeBuild::nleaves, sizeof(int)));
fprintf(stderr, " buildOctree done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
std::swap(d_ptclPos_tmp.ptr, d_ptclVel.ptr);
}
/* sort nodes by level */
{
hipDeviceSynchronize();
const double t0 = rtc();
const int nthread = 256;
const int nblock = (nCells-1)/nthread + 1;
hipLaunchKernelGGL(( treeBuild::get_cell_levels), dim3(nblock),dim3(nthread), 0, 0, nCells, d_cellDataList, d_cellDataList_tmp, d_key, d_value);
thrust::device_ptr<int> keys_beg(d_key.ptr);
thrust::device_ptr<int> keys_end(d_key.ptr + nCells);
thrust::device_ptr<int> vals_beg(d_value.ptr);
thrust::stable_sort_by_key(keys_beg, keys_end, vals_beg);
/* compute begining & end of each level */
hipLaunchKernelGGL(( treeBuild::compute_level_begIdx), dim3(nblock),dim3(nthread),(nthread+2)*sizeof(int), 0, nCells, d_key, d_level_begIdx);
hipLaunchKernelGGL(( treeBuild::write_newIdx) , dim3(nblock),dim3(nthread), 0, 0, nCells, d_value, d_key);
hipLaunchKernelGGL(( treeBuild::shuffle_cells), dim3(nblock),dim3(nthread), 0, 0, nCells, d_value, d_key, d_cellDataList_tmp, d_cellDataList);
/* group leaves */
d_leafList.realloc(nLeaves);
const int NTHREAD2 = 8;
const int NTHREAD = 256;
const int nblock1 = (nCells-1)/NTHREAD+1;
hipLaunchKernelGGL(( treeBuild::collect_leaves<NTHREAD2>), dim3(nblock1),dim3(NTHREAD), 0, 0, nCells, d_cellDataList, d_leafList);
kernelSuccess("shuffle");
const double t1 = rtc();
const double dt = t1 - t0;
fprintf(stderr, " shuffle done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
#if 0
int nnn;
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&nnn, treeBuild::leafIdx_counter, sizeof(int)));
printf("nnn= %d nLeaves= %d\n", nnn , nLeaves);
assert(nnn == nLeaves);
std::vector<int> leaf(nLeaves);
d_leafList.d2h(&leaf[0]);
for (int i = 0; i < nLeaves; i++)
printf("leaf= %d : %d \n",i, leaf[i]);
#endif
}
#if 0 /* tree consistency */
{
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int2 levels[32];
d_level_begIdx.d2h(levels);
std::vector<unsigned long long> keys(nPtcl);
for (int i= 1; i < 32; i++)
{
const int2 lv = levels[i];
if (lv.y == 0) break;
int jk = 0;
for (int j = lv.x; j <= lv.y; j++)
keys[jk++] = ((unsigned long long)cells[j].pbeg() << 32) | cells[j].pend();
// thrust::sort(&keys[0], &keys[jk]);
int np = 0;
for (int j = 0; j < jk ;j++)
{
const int pbeg = keys[j] >> 32;
const int pend = keys[j] & 0xFFFFFFFF;
np += pend-pbeg;
printf(" cell= %d: np= %d: pbeg= %d pend= %d \n", j, pend-pbeg, pbeg, pend);
}
printf("level= %d ncells= %d %d %d :: np= %d\n", i, lv.y-lv.x+1, lv.x, lv.y+1,np);
}
fflush(stdout);
assert(0);
}
#endif
#if 0 /* tree consistency */
{
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int2 levels[32];
d_level_begIdx.d2h(levels);
std::vector<unsigned long long> keys(nPtcl);
std::vector<int> currLevel, nextLevel;
currLevel.reserve(nPtcl);
nextLevel.reserve(nPtcl);
for (int i = 0; i < 8; i++)
currLevel.push_back(i);
for (int i= 1; i < 32; i++)
{
const int2 lv = levels[i];
if (lv.y == 0) break;
int jk = 0;
for (int j = lv.x; j <= lv.y; j++)
keys[jk++] = ((unsigned long long)cells[j].pbeg() << 32) | cells[j].pend();
// thrust::sort(&keys[0], &keys[jk]);
int np = 0;
for (int j = 0; j < jk ;j++)
{
const int pbeg = keys[j] >> 32;
const int pend = keys[j] & 0xFFFFFFFF;
np += pend-pbeg;
printf(" cell= %d: np= %d: pbeg= %d pend= %d \n", j, pend-pbeg, pbeg, pend);
}
printf("level= %d ncells= %d %d %d :: np= %d\n", i, lv.y-lv.x+1, lv.x, lv.y+1,np);
}
fflush(stdout);
assert(0);
}
#endif
#if 0
{ /* print tree structure */
fprintf(stderr, " ncells= %d nLevels= %d nNodes= %d nLeaves= %d (%d) \n", nCells, nLevels, nNodes, nLeaves, nNodes+nLeaves);
#if 0
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int cellL[33] = {0};
int np=0;
for (int i = 0; i < nCells; i++)
{
const CellData cell = cells[i];
assert(cell.level() >= 0);
assert(cell.level() < 32);
if (cell.isNode())
assert(cell.first() + cell.n() <= nCells);
else
np += cell.pend() - cell.pbeg();
}
fprintf(stderr, "np_leaf= %d\n", np);
int addr = 0;
int nlev = 0;
for (int i= 0; i < 32; i++)
{
nlev++;
printf("level= %d ncells= %d %d %d \n", i, cellL[i], addr, addr + cellL[i]);
addr += cellL[i];
if (cellL[i+1] == 0) break;
}
#endif
int2 levels[32];
d_level_begIdx.d2h(levels);
for (int i= 0; i < nLevels; i++)
{
const int2 lv = levels[i];
printf("level= %d ncells= %d %d %d \n", i, lv.y-lv.x+1, lv.x, lv.y+1);
}
#if 0
for (int i = 0; i < nCells; i++)
{
printf("cellIdx= %d isNode= %s: lev= %d first= %d n= %d pbeg= %d pend =%d\n",
i, cells[i].isNode() ? "true ":"false", cells[i].level(),
cells[i].first(), cells[i].n(), cells[i].pbeg(), cells[i].pend());
}
fflush(stdout);
assert(0);
#endif
}
#endif
}
#include "TreecodeInstances.h"
| 806950387e27bd9a09c2c35a5ce4399d48abe764.cu | #include "Treecode.h"
#define NWARPS_OCTREE2 3
#define NWARPS2 NWARPS_OCTREE2
#define NWARPS (1<<NWARPS2)
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "cuda_primitives.h"
namespace treeBuild
{
static __forceinline__ __device__ void computeGridAndBlockSize(dim3 &grid, dim3 &block, const int np)
{
const int NTHREADS = (1<<NWARPS_OCTREE2) * WARP_SIZE;
block = dim3(NTHREADS);
assert(np > 0);
grid = dim3(min(max(np/(NTHREADS*4),1), 512));
}
__device__ unsigned int retirementCount = 0;
__constant__ int d_node_max;
__constant__ int d_cell_max;
__device__ unsigned int nnodes = 0;
__device__ unsigned int nleaves = 0;
__device__ unsigned int nlevels = 0;
__device__ unsigned int nbodies_leaf = 0;
__device__ unsigned int ncells = 0;
__device__ int *memPool;
__device__ CellData *cellDataList;
__device__ void *ptclVel_tmp;
template<int NTHREAD2>
static __device__ float2 minmax_block(float2 sum)
{
extern __shared__ float shdata[];
float *shMin = shdata;
float *shMax = shdata + (1<<NTHREAD2);
const int tid = threadIdx.x;
shMin[tid] = sum.x;
shMax[tid] = sum.y;
__syncthreads();
#pragma unroll
for (int i = NTHREAD2-1; i >= 6; i--)
{
const int offset = 1 << i;
if (tid < offset)
{
shMin[tid] = sum.x = fminf(sum.x, shMin[tid + offset]);
shMax[tid] = sum.y = fmaxf(sum.y, shMax[tid + offset]);
}
__syncthreads();
}
if (tid < 32)
{
volatile float *vshMin = shMin;
volatile float *vshMax = shMax;
#pragma unroll
for (int i = 5; i >= 0; i--)
{
const int offset = 1 << i;
vshMin[tid] = sum.x = fminf(sum.x, vshMin[tid + offset]);
vshMax[tid] = sum.y = fmaxf(sum.y, vshMax[tid + offset]);
}
}
__syncthreads();
return sum;
}
template<const int NTHREAD2, typename T>
static __global__ void computeBoundingBox(
const int n,
__out Position<T> *minmax_ptr,
__out Box<T> *box_ptr,
const Particle4<T> *ptclPos)
{
const int NTHREAD = 1<<NTHREAD2;
const int NBLOCK = NTHREAD;
Position<T> bmin(T(+1e10)), bmax(T(-1e10));
const int nbeg = blockIdx.x * NTHREAD + threadIdx.x;
for (int i = nbeg; i < n; i += NBLOCK*NTHREAD)
if (i < n)
{
const Particle4<T> p = ptclPos[i];
const Position<T> pos(p.x(), p.y(), p.z());
bmin = Position<T>::min(bmin, pos);
bmax = Position<T>::max(bmax, pos);
}
float2 res;
res = minmax_block<NTHREAD2>(make_float2(bmin.x, bmax.x)); bmin.x = res.x; bmax.x = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.y, bmax.y)); bmin.y = res.x; bmax.y = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.z, bmax.z)); bmin.z = res.x; bmax.z = res.y;
if (threadIdx.x == 0)
{
minmax_ptr[blockIdx.x ] = bmin;
minmax_ptr[blockIdx.x + NBLOCK] = bmax;
}
__shared__ int lastBlock; /* with bool, doesn't compile in CUDA 6.0 */
__threadfence();
__syncthreads();
if (threadIdx.x == 0)
{
const int ticket = atomicInc(&retirementCount, NBLOCK);
lastBlock = (ticket == NBLOCK - 1);
}
__syncthreads();
#if 1
if (lastBlock)
{
bmin = minmax_ptr[threadIdx.x];
bmax = minmax_ptr[threadIdx.x + NBLOCK];
float2 res;
res = minmax_block<NTHREAD2>(make_float2(bmin.x, bmax.x)); bmin.x = res.x; bmax.x = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.y, bmax.y)); bmin.y = res.x; bmax.y = res.y;
res = minmax_block<NTHREAD2>(make_float2(bmin.z, bmax.z)); bmin.z = res.x; bmax.z = res.y;
__syncthreads();
if (threadIdx.x == 0)
{
#if 0
printf("bmin= %g %g %g \n", bmin.x, bmin.y, bmin.z);
printf("bmax= %g %g %g \n", bmax.x, bmax.y, bmax.z);
#endif
const Position<T> cvec((bmax.x+bmin.x)*T(0.5), (bmax.y+bmin.y)*T(0.5), (bmax.z+bmin.z)*T(0.5));
const Position<T> hvec((bmax.x-bmin.x)*T(0.5), (bmax.y-bmin.y)*T(0.5), (bmax.z-bmin.z)*T(0.5));
const T h = fmax(hvec.z, fmax(hvec.y, hvec.x));
T hsize = T(1.0);
while (hsize > h) hsize *= T(0.5);
while (hsize < h) hsize *= T(2.0);
const int NMAXLEVEL = 20;
const T hquant = hsize / T(1<<NMAXLEVEL);
const long long nx = (long long)(cvec.x/hquant);
const long long ny = (long long)(cvec.y/hquant);
const long long nz = (long long)(cvec.z/hquant);
const Position<T> centre(hquant * T(nx), hquant * T(ny), hquant * T(nz));
*box_ptr = Box<T>(centre, hsize);
retirementCount = 0;
}
}
#endif
}
/*******************/
template<int NLEAF, typename T, bool STOREIDX>
static __global__ void
__launch_bounds__( 256, 8)
buildOctant(
Box<T> box,
const int cellParentIndex,
const int cellIndexBase,
const int octantMask,
__out int *octCounterBase,
Particle4<T> *ptcl,
Particle4<T> *buff,
const int level = 0)
{
typedef typename vec<4,T>::type T4;
/* compute laneIdx & warpIdx for each of the threads:
* the thread block contains only 8 warps
* a warp is responsible for a single octant of the cell
*/
const int laneIdx = threadIdx.x & (WARP_SIZE-1);
const int warpIdx = threadIdx.x >> WARP_SIZE2;
/* We launch a 2D grid:
* the y-corrdinate carries info about which parent cell to process
* the x-coordinate is just a standard approach for CUDA parallelism
*/
const int octant2process = (octantMask >> (3*blockIdx.y)) & 0x7;
/* get the pointer to atomic data that for a given octant */
int *octCounter = octCounterBase + blockIdx.y*(8+8+8+64+8);
/* read data about the current cell */
const int data = octCounter[laneIdx];
const int nBeg = __shfl(data, 1, WARP_SIZE);
const int nEnd = __shfl(data, 2, WARP_SIZE);
/* if we are not at the root level, compute the geometric box
* of the cell */
if (!STOREIDX)
box = ChildBox(box, octant2process);
/* countes number of particles in each octant of a child octant */
__shared__ int nShChildrenFine[NWARPS][9][8];
__shared__ int nShChildren[8][8];
Box<T> *shChildBox = (Box<T>*)&nShChildren[0][0];
int *shdata = (int*)&nShChildrenFine[0][0][0];
#pragma unroll
for (int i = 0; i < 8*9*NWARPS; i += NWARPS*WARP_SIZE)
if (i + threadIdx.x < 8*9*NWARPS)
shdata[i + threadIdx.x] = 0;
if (laneIdx == 0 && warpIdx < 8)
shChildBox[warpIdx] = ChildBox(box, warpIdx);
__syncthreads();
/* process particle array */
const int nBeg_block = nBeg + blockIdx.x * blockDim.x;
for (int i = nBeg_block; i < nEnd; i += gridDim.x * blockDim.x)
{
Particle4<T> p4 = ptcl[min(i+threadIdx.x, nEnd-1)];
int p4octant = p4.get_oct();
if (STOREIDX)
{
p4.set_idx(i + threadIdx.x);
p4octant = Octant(box.centre, Position<T>(p4.x(), p4.y(), p4.z()));
}
p4octant = i+threadIdx.x < nEnd ? p4octant : 0xF;
/* compute suboctant of the octant into which particle will fall */
if (p4octant < 8)
{
const int p4subOctant = Octant(shChildBox[p4octant].centre, Position<T>(p4.x(), p4.y(), p4.z()));
p4.set_oct(p4subOctant);
}
/* compute number of particles in each of the octants that will be processed by thead block */
int np = 0;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
const int sum = warpBinReduce(p4octant == octant);
if (octant == laneIdx)
np = sum;
}
/* increment atomic counters in a single instruction for thread-blocks to participated */
int addrB0;
if (laneIdx < 8)
addrB0 = atomicAdd(&octCounter[8+8+laneIdx], np);
/* compute addresses where to write data */
int cntr = 32;
int addrW = -1;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
const int sum = warpBinReduce(p4octant == octant);
if (sum > 0)
{
const int offset = warpBinExclusiveScan1(p4octant == octant);
const int addrB = __shfl(addrB0, octant, WARP_SIZE);
if (p4octant == octant)
addrW = addrB + offset;
cntr -= sum;
if (cntr == 0) break;
}
}
/* write the data in a single instruction */
if (addrW >= 0)
buff[addrW] = p4;
/* count how many particles in suboctants in each of the octants */
cntr = 32;
#pragma unroll
for (int octant = 0; octant < 8; octant++)
{
if (cntr == 0) break;
const int sum = warpBinReduce(p4octant == octant);
if (sum > 0)
{
const int subOctant = p4octant == octant ? p4.get_oct() : -1;
#pragma unroll
for (int k = 0; k < 8; k += 4)
{
const int4 sum = make_int4(
warpBinReduce(k+0 == subOctant),
warpBinReduce(k+1 == subOctant),
warpBinReduce(k+2 == subOctant),
warpBinReduce(k+3 == subOctant));
if (laneIdx == 0)
{
int4 value = *(int4*)&nShChildrenFine[warpIdx][octant][k];
value.x += sum.x;
value.y += sum.y;
value.z += sum.z;
value.w += sum.w;
*(int4*)&nShChildrenFine[warpIdx][octant][k] = value;
}
}
cntr -= sum;
}
}
}
__syncthreads();
if (warpIdx >= 8) return;
#pragma unroll
for (int k = 0; k < 8; k += 4)
{
int4 nSubOctant = laneIdx < NWARPS ? (*(int4*)&nShChildrenFine[laneIdx][warpIdx][k]) : make_int4(0,0,0,0);
#pragma unroll
for (int i = NWARPS2-1; i >= 0; i--)
{
nSubOctant.x += __shfl_xor(nSubOctant.x, 1<<i, NWARPS);
nSubOctant.y += __shfl_xor(nSubOctant.y, 1<<i, NWARPS);
nSubOctant.z += __shfl_xor(nSubOctant.z, 1<<i, NWARPS);
nSubOctant.w += __shfl_xor(nSubOctant.w, 1<<i, NWARPS);
}
if (laneIdx == 0)
*(int4*)&nShChildren[warpIdx][k] = nSubOctant;
}
__syncthreads();
if (laneIdx < 8)
if (nShChildren[warpIdx][laneIdx] > 0)
atomicAdd(&octCounter[8+16+warpIdx*8 + laneIdx], nShChildren[warpIdx][laneIdx]);
__syncthreads(); /* must be present, otherwise race conditions occurs between parent & children */
/* detect last thread block for unique y-coordinate of the grid:
* mind, this cannot be done on the host, because we don't detect last
* block on the grid, but instead the last x-block for each of the y-coordainte of the grid
* this should increase the degree of parallelism
*/
int *shmem = &nShChildren[0][0];
if (warpIdx == 0)
shmem[laneIdx] = 0;
int &lastBlock = shmem[0];
if (threadIdx.x == 0)
{
const int ticket = atomicAdd(octCounter, 1);
lastBlock = (ticket == gridDim.x-1);
}
__syncthreads();
if (!lastBlock) return;
__syncthreads();
/* okay, we are in the last thread block, do the analysis and decide what to do next */
if (warpIdx == 0)
shmem[laneIdx] = 0;
if (threadIdx.x == 0)
atomicCAS(&nlevels, level, level+1);
__syncthreads();
/* compute beginning and then end addresses of the sorted particles in the child cell */
const int nCell = __shfl(data, 8+warpIdx, WARP_SIZE);
const int nEnd1 = octCounter[8+8+warpIdx];
const int nBeg1 = nEnd1 - nCell;
if (laneIdx == 0)
shmem[warpIdx] = nCell;
__syncthreads();
const int npCell = laneIdx < 8 ? shmem[laneIdx] : 0;
/* compute number of children that needs to be further split, and cmopute their offsets */
const int2 nSubNodes = warpBinExclusiveScan(npCell > NLEAF);
const int2 nLeaves = warpBinExclusiveScan(npCell > 0 && npCell <= NLEAF);
if (warpIdx == 0 && laneIdx < 8)
{
shmem[8 +laneIdx] = nSubNodes.x;
shmem[16+laneIdx] = nLeaves.x;
}
int nCellmax = npCell;
#pragma unroll
for (int i = 2; i >= 0; i--)
nCellmax = max(nCellmax, __shfl_xor(nCellmax, 1<<i, WARP_SIZE));
/* if there is at least one cell to split, increment nuumber of the nodes */
if (threadIdx.x == 0 && nSubNodes.y > 0)
{
shmem[16+8] = atomicAdd(&nnodes,nSubNodes.y);
#if 1 /* temp solution, a better one is to use RingBuffer */
assert(shmem[16+8] < d_node_max);
#endif
}
/* writing linking info, parent, child and particle's list */
const int nChildrenCell = warpBinReduce(npCell > 0);
if (threadIdx.x == 0 && nChildrenCell > 0)
{
const int cellFirstChildIndex = atomicAdd(&ncells, nChildrenCell);
#if 1
assert(cellFirstChildIndex + nChildrenCell < d_cell_max);
#endif
/*** keep in mind, the 0-level will be overwritten ***/
assert(nChildrenCell > 0);
assert(nChildrenCell <= 8);
const CellData cellData(level,cellParentIndex, nBeg, nEnd, cellFirstChildIndex, nChildrenCell-1);
assert(cellData.first() < ncells);
assert(cellData.isNode());
cellDataList[cellIndexBase + blockIdx.y] = cellData;
shmem[16+9] = cellFirstChildIndex;
}
__syncthreads();
const int cellFirstChildIndex = shmem[16+9];
/* compute atomic data offset for cell that need to be split */
const int next_node = shmem[16+8];
int *octCounterNbase = &memPool[next_node*(8+8+8+64+8)];
const int nodeOffset = shmem[8 +warpIdx];
const int leafOffset = shmem[16+warpIdx];
/* if cell needs to be split, populate it shared atomic data */
if (nCell > NLEAF)
{
int *octCounterN = octCounterNbase + nodeOffset*(8+8+8+64+8);
/* number of particles in each cell's subcells */
const int nSubCell = laneIdx < 8 ? octCounter[8+16+warpIdx*8 + laneIdx] : 0;
/* compute offsets */
int cellOffset = nSubCell;
#pragma unroll
for(int i = 0; i < 3; i++) /* log2(8) steps */
cellOffset = shfl_scan_add_step(cellOffset, 1 << i);
cellOffset -= nSubCell;
/* store offset in memory */
cellOffset = __shfl_up(cellOffset, 8, WARP_SIZE);
if (laneIdx < 8) cellOffset = nSubCell;
else cellOffset += nBeg1;
cellOffset = __shfl_up(cellOffset, 8, WARP_SIZE);
if (laneIdx < 8) cellOffset = 0;
if (laneIdx == 1) cellOffset = nBeg1;
if (laneIdx == 2) cellOffset = nEnd1;
if (laneIdx < 24)
octCounterN[laneIdx] = cellOffset;
}
/***************************/
/* launch child kernel */
/***************************/
/* warps coorperate so that only 1 kernel needs to be launched by a thread block
* with larger degree of paralellism */
if (nSubNodes.y > 0 && warpIdx == 0)
{
/* build octant mask */
int octant_mask = npCell > NLEAF ? (laneIdx << (3*nSubNodes.x)) : 0;
#pragma unroll
for (int i = 4; i >= 0; i--)
octant_mask |= __shfl_xor(octant_mask, 1<<i, WARP_SIZE);
if (threadIdx.x == 0)
{
dim3 grid, block;
computeGridAndBlockSize(grid, block, nCellmax);
#if 0
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
grid.y = nSubNodes.y; /* each y-coordinate of the grid will be busy for each parent cell */
atomicAdd(&n_scheduled,1);
atomicAdd(&n_in_que, 1);
atomicMax(&n_in_que_max, n_in_que);
#if defined(FASTMODE) && NWARPS==8
if (nCellmax <= block.x)
{
grid.x = 1;
buildOctantSingle<NLEAF,T><<<grid,block,0,stream>>>
(box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
}
else
#endif
buildOctant<NLEAF,T,false><<<grid,block,0,stream>>>
(box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
#else
grid.y = nSubNodes.y; /* each y-coordinate of the grid will be busy for each parent cell */
#if defined(FASTMODE) && NWARPS==8
if (nCellmax <= block.x)
{
grid.x = 1;
buildOctantSingle<NLEAF,T><<<grid,block>>>
(box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
}
else
#endif
buildOctant<NLEAF,T,false><<<grid,block>>>
(box, cellIndexBase+blockIdx.y, cellFirstChildIndex,
octant_mask, octCounterNbase, buff, ptcl, level+1);
#endif
const cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf(" launch failed 1: %s level= %d n =%d \n", cudaGetErrorString(err), level);
assert(0);
}
}
}
/******************/
/* process leaves */
/******************/
if (nCell <= NLEAF && nCell > 0)
{
if (laneIdx == 0)
{
atomicAdd(&nleaves,1);
atomicAdd(&nbodies_leaf, nEnd1-nBeg1);
const CellData leafData(level+1, cellIndexBase+blockIdx.y, nBeg1, nEnd1);
assert(!leafData.isNode());
cellDataList[cellFirstChildIndex + nSubNodes.y + leafOffset] = leafData;
}
if (!(level&1))
{
for (int i = nBeg1+laneIdx; i < nEnd1; i += WARP_SIZE)
if (i < nEnd1)
{
Particle4<T> pos = buff[i];
Particle4<T> vel = ((Particle4<T>*)ptclVel_tmp)[pos.get_idx()];
#ifdef PSHFL_SANITY_CHECK
pos.mass() = T(pos.get_idx());
#else
pos.mass() = vel.mass();
#endif
ptcl[i] = pos;
buff[i] = vel;
}
}
else
{
for (int i = nBeg1+laneIdx; i < nEnd1; i += WARP_SIZE)
if (i < nEnd1)
{
Particle4<T> pos = buff[i];
Particle4<T> vel = ((Particle4<T>*)ptclVel_tmp)[pos.get_idx()];
#ifdef PSHFL_SANITY_CHECK
pos.mass() = T(pos.get_idx());
#else
pos.mass() = vel.mass();
#endif
buff[i] = pos;
ptcl[i] = vel;
}
}
}
}
template<typename T>
static __global__ void countAtRootNode(
const int n,
__out int *octCounter,
const Box<T> box,
const Particle4<T> *ptclPos)
{
int np_octant[8] = {0};
const int beg = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = beg; i < n; i += gridDim.x * blockDim.x)
if (i < n)
{
const Particle4<T> p = ptclPos[i];
const Position<T> pos(p.x(), p.y(), p.z());
const int octant = Octant(box.centre, pos);
np_octant[0] += (octant == 0);
np_octant[1] += (octant == 1);
np_octant[2] += (octant == 2);
np_octant[3] += (octant == 3);
np_octant[4] += (octant == 4);
np_octant[5] += (octant == 5);
np_octant[6] += (octant == 6);
np_octant[7] += (octant == 7);
};
const int laneIdx = threadIdx.x & (WARP_SIZE-1);
#pragma unroll
for (int k = 0; k < 8; k++)
{
int np = np_octant[k];
#pragma unroll
for (int i = 4; i >= 0; i--)
np += __shfl_xor(np, 1<<i, WARP_SIZE);
if (laneIdx == 0)
atomicAdd(&octCounter[8+k],np);
}
}
template<int NLEAF, typename T>
static __global__ void buildOctree(
const int n,
const Box<T> *domain,
CellData *d_cellDataList,
int *stack_memory_pool,
Particle4<T> *ptcl,
Particle4<T> *buff,
Particle4<T> *d_ptclVel,
int *ncells_return = NULL)
{
cellDataList = d_cellDataList;
ptclVel_tmp = (void*)d_ptclVel;
memPool = stack_memory_pool;
#if 0
printf("n= %d\n", n);
printf("d_node_max= %d\n", d_node_max);
printf("d_cell_max= %d\n", d_cell_max);
printf("GPU: box_centre= %g %g %g hsize= %g\n",
domain->centre.x,
domain->centre.y,
domain->centre.z,
domain->hsize);
#endif
int *octCounter = new int[8+8];
for (int k = 0; k < 16; k++)
octCounter[k] = 0;
countAtRootNode<T><<<256, 256>>>(n, octCounter, *domain, ptcl);
assert(cudaGetLastError() == cudaSuccess);
cudaDeviceSynchronize();
#if 1
int total = 0;
for (int k = 8; k < 16; k++)
{
printf("octCounter[%d]= %d\n", k-8, octCounter[k]);
total += octCounter[k];
}
printf("total= %d n= %d\n", total, n);
#endif
int *octCounterN = new int[8+8+8+64+8];
#pragma unroll
for (int k = 0; k < 8; k++)
{
octCounterN[ k] = 0;
octCounterN[8+ k] = octCounter[8+k ];
octCounterN[8+8 +k] = k == 0 ? 0 : octCounterN[8+8+k-1] + octCounterN[8+k-1];
octCounterN[8+16+k] = 0;
}
#pragma unroll
for (int k = 8; k < 64; k++)
octCounterN[8+16+k] = 0;
#if 0
for (int k = 0; k < 8; k++)
printf("k= %d n = %d offset= %d \n",
k, octCounterN[8+k], octCounterN[8+8+k]);
#endif
#ifdef IOCOUNT
io_words = 0;
#endif
nnodes = 0;
nleaves = 0;
nlevels = 0;
ncells = 0;
nbodies_leaf = 0;
octCounterN[1] = 0;
octCounterN[2] = n;
dim3 grid, block;
computeGridAndBlockSize(grid, block, n);
#if 1
buildOctant<NLEAF,T,true><<<grid, block>>>
(*domain, 0, 0, 0, octCounterN, ptcl, buff);
assert(cudaDeviceSynchronize() == cudaSuccess);
#endif
printf(" nptcl = %d\n", n);
printf(" nb_leaf= %d\n", nbodies_leaf);
printf(" nnodes = %d\n", nnodes);
printf(" nleaves= %d\n", nleaves);
printf(" ncells= %d\n", ncells);
printf(" nlevels= %d\n", nlevels);
if (ncells_return != NULL)
*ncells_return = ncells;
#ifdef IOCOUNT
printf(" io= %g MB \n" ,io_words*4.0/1024.0/1024.0);
#endif
delete [] octCounter;
delete [] octCounterN;
}
static __global__ void
get_cell_levels(const int n, const CellData cellList[], CellData cellListOut[], int key[], int value[])
{
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n) return;
const CellData cell = cellList[idx];
key [idx] = cell.level();
value[idx] = idx;
cellListOut[idx] = cell;
}
static __global__ void
write_newIdx(const int n, const int value[], int moved_to_idx[])
{
const int newIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (newIdx >= n) return;
const int oldIdx = value[newIdx];
moved_to_idx[oldIdx] = newIdx;
}
static __global__ void
compute_level_begIdx(const int n, const int levels[], int2 level_begendIdx[])
{
const int gidx = blockIdx.x*blockDim.x + threadIdx.x;
if (gidx >= n) return;
extern __shared__ int shLevels[];
const int tid = threadIdx.x;
shLevels[tid+1] = levels[gidx];
int shIdx = 0;
int gmIdx = max(blockIdx.x*blockDim.x-1,0);
if (tid == 1)
{
shIdx = blockDim.x+1;
gmIdx = min(blockIdx.x*blockDim.x + blockDim.x,n-1);
}
if (tid < 2)
shLevels[shIdx] = levels[gmIdx];
__syncthreads();
const int idx = tid+1;
const int currLevel = shLevels[idx];
const int prevLevel = shLevels[idx-1];
if (currLevel != prevLevel || gidx == 0)
level_begendIdx[currLevel].x = gidx;
const int nextLevel = shLevels[idx+1];
if (currLevel != nextLevel || gidx == n-1)
level_begendIdx[currLevel].y = gidx;
}
__device__ unsigned int leafIdx_counter = 0;
static __global__ void
shuffle_cells(const int n, const int value[], const int moved_to_idx[], const CellData cellListIn[], CellData cellListOut[])
{
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n) return;
const int mapIdx = value[idx];
CellData cell = cellListIn[mapIdx];
if (cell.isNode())
{
const int firstOld = cell.first();
const int firstNew = moved_to_idx[firstOld];
cell.update_first(firstNew);
}
if (cell.parent() > 0)
cell.update_parent(moved_to_idx[cell.parent()]);
cellListOut[idx] = cell;
if (threadIdx.x == 0 && blockIdx.x == 0)
leafIdx_counter = 0;
}
template<int NTHREAD2>
static __global__
void collect_leaves(const int n, const CellData *cellList, int *leafList)
{
const int gidx = blockDim.x*blockIdx.x + threadIdx.x;
const CellData cell = cellList[min(gidx,n-1)];
__shared__ int shdata[1<<NTHREAD2];
int value = gidx < n && cell.isLeaf();
shdata[threadIdx.x] = value;
#pragma unroll
for (int offset2 = 0; offset2 < NTHREAD2; offset2++)
{
const int offset = 1 << offset2;
__syncthreads();
if (threadIdx.x >= offset)
value += shdata[threadIdx.x - offset];
__syncthreads();
shdata[threadIdx.x] = value;
}
const int nwrite = shdata[threadIdx.x];
const int scatter = nwrite - (gidx < n && cell.isLeaf());
__syncthreads();
if (threadIdx.x == blockDim.x-1 && nwrite > 0)
shdata[0] = atomicAdd(&leafIdx_counter, nwrite);
__syncthreads();
if (cell.isLeaf())
leafList[shdata[0] + scatter] = gidx;
}
}
template<typename real_t>
void Treecode<real_t>::buildTree(const int nLeaf)
{
this->nLeaf = nLeaf;
assert(nLeaf == 16 || nLeaf == 24 || nLeaf == 32 || nLeaf == 48 || nLeaf == 64);
/* compute bounding box */
{
const int NTHREAD2 = 8;
const int NTHREAD = 1<<NTHREAD2;
const int NBLOCK = NTHREAD;
assert(2*NBLOCK <= 2048); /* see Treecode constructor for d_minmax allocation */
cudaDeviceSynchronize();
kernelSuccess("cudaDomainSize0");
const double t0 = rtc();
treeBuild::computeBoundingBox<NTHREAD2,real_t><<<NBLOCK,NTHREAD,NTHREAD*sizeof(float2)>>>
(nPtcl, d_minmax, d_domain, d_ptclPos);
kernelSuccess("cudaDomainSize");
const double dt = rtc() - t0;
fprintf(stderr, " cudaDomainSize done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
}
/*** build tree ***/
CUDA_SAFE_CALL(cudaMemcpyToSymbol(treeBuild::d_node_max, &node_max, sizeof(int), 0, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(treeBuild::d_cell_max, &cell_max, sizeof(int), 0, cudaMemcpyHostToDevice));
// cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount,16384);
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<16,real_t,true>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<16,real_t,false>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<24,real_t,true>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<24,real_t,false>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<32,real_t,true>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<32,real_t,false>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<48,real_t,true>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<48,real_t,false>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<64,real_t,true>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(&treeBuild::buildOctant<64,real_t,false>, cudaFuncCachePreferShared));
CUDA_SAFE_CALL(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte));
{
CUDA_SAFE_CALL(cudaMemset(d_stack_memory_pool,0,stack_size*sizeof(int)));
cudaDeviceSynchronize();
const double t0 = rtc();
switch(nLeaf)
{
case 16:
treeBuild::buildOctree<16,real_t><<<1,1>>>(
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 24:
treeBuild::buildOctree<24,real_t><<<1,1>>>(
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 32:
treeBuild::buildOctree<32,real_t><<<1,1>>>(
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 48:
treeBuild::buildOctree<48,real_t><<<1,1>>>(
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
case 64:
treeBuild::buildOctree<64,real_t><<<1,1>>>(
nPtcl, d_domain, d_cellDataList, d_stack_memory_pool, d_ptclPos, d_ptclPos_tmp, d_ptclVel);
break;
default:
assert(0);
}
kernelSuccess("buildOctree");
const double t1 = rtc();
const double dt = t1 - t0;
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&nLevels, treeBuild::nlevels, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&nCells, treeBuild::ncells, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&nNodes, treeBuild::nnodes, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&nLeaves, treeBuild::nleaves, sizeof(int)));
fprintf(stderr, " buildOctree done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
std::swap(d_ptclPos_tmp.ptr, d_ptclVel.ptr);
}
/* sort nodes by level */
{
cudaDeviceSynchronize();
const double t0 = rtc();
const int nthread = 256;
const int nblock = (nCells-1)/nthread + 1;
treeBuild::get_cell_levels<<<nblock,nthread>>>(nCells, d_cellDataList, d_cellDataList_tmp, d_key, d_value);
thrust::device_ptr<int> keys_beg(d_key.ptr);
thrust::device_ptr<int> keys_end(d_key.ptr + nCells);
thrust::device_ptr<int> vals_beg(d_value.ptr);
thrust::stable_sort_by_key(keys_beg, keys_end, vals_beg);
/* compute begining & end of each level */
treeBuild::compute_level_begIdx<<<nblock,nthread,(nthread+2)*sizeof(int)>>>(nCells, d_key, d_level_begIdx);
treeBuild::write_newIdx <<<nblock,nthread>>>(nCells, d_value, d_key);
treeBuild::shuffle_cells<<<nblock,nthread>>>(nCells, d_value, d_key, d_cellDataList_tmp, d_cellDataList);
/* group leaves */
d_leafList.realloc(nLeaves);
const int NTHREAD2 = 8;
const int NTHREAD = 256;
const int nblock1 = (nCells-1)/NTHREAD+1;
treeBuild::collect_leaves<NTHREAD2><<<nblock1,NTHREAD>>>(nCells, d_cellDataList, d_leafList);
kernelSuccess("shuffle");
const double t1 = rtc();
const double dt = t1 - t0;
fprintf(stderr, " shuffle done in %g sec : %g Mptcl/sec\n", dt, nPtcl/1e6/dt);
#if 0
int nnn;
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&nnn, treeBuild::leafIdx_counter, sizeof(int)));
printf("nnn= %d nLeaves= %d\n", nnn , nLeaves);
assert(nnn == nLeaves);
std::vector<int> leaf(nLeaves);
d_leafList.d2h(&leaf[0]);
for (int i = 0; i < nLeaves; i++)
printf("leaf= %d : %d \n",i, leaf[i]);
#endif
}
#if 0 /* tree consistency */
{
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int2 levels[32];
d_level_begIdx.d2h(levels);
std::vector<unsigned long long> keys(nPtcl);
for (int i= 1; i < 32; i++)
{
const int2 lv = levels[i];
if (lv.y == 0) break;
int jk = 0;
for (int j = lv.x; j <= lv.y; j++)
keys[jk++] = ((unsigned long long)cells[j].pbeg() << 32) | cells[j].pend();
// thrust::sort(&keys[0], &keys[jk]);
int np = 0;
for (int j = 0; j < jk ;j++)
{
const int pbeg = keys[j] >> 32;
const int pend = keys[j] & 0xFFFFFFFF;
np += pend-pbeg;
printf(" cell= %d: np= %d: pbeg= %d pend= %d \n", j, pend-pbeg, pbeg, pend);
}
printf("level= %d ncells= %d %d %d :: np= %d\n", i, lv.y-lv.x+1, lv.x, lv.y+1,np);
}
fflush(stdout);
assert(0);
}
#endif
#if 0 /* tree consistency */
{
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int2 levels[32];
d_level_begIdx.d2h(levels);
std::vector<unsigned long long> keys(nPtcl);
std::vector<int> currLevel, nextLevel;
currLevel.reserve(nPtcl);
nextLevel.reserve(nPtcl);
for (int i = 0; i < 8; i++)
currLevel.push_back(i);
for (int i= 1; i < 32; i++)
{
const int2 lv = levels[i];
if (lv.y == 0) break;
int jk = 0;
for (int j = lv.x; j <= lv.y; j++)
keys[jk++] = ((unsigned long long)cells[j].pbeg() << 32) | cells[j].pend();
// thrust::sort(&keys[0], &keys[jk]);
int np = 0;
for (int j = 0; j < jk ;j++)
{
const int pbeg = keys[j] >> 32;
const int pend = keys[j] & 0xFFFFFFFF;
np += pend-pbeg;
printf(" cell= %d: np= %d: pbeg= %d pend= %d \n", j, pend-pbeg, pbeg, pend);
}
printf("level= %d ncells= %d %d %d :: np= %d\n", i, lv.y-lv.x+1, lv.x, lv.y+1,np);
}
fflush(stdout);
assert(0);
}
#endif
#if 0
{ /* print tree structure */
fprintf(stderr, " ncells= %d nLevels= %d nNodes= %d nLeaves= %d (%d) \n", nCells, nLevels, nNodes, nLeaves, nNodes+nLeaves);
#if 0
std::vector<char> cells_storage(sizeof(CellData)*nCells);
CellData *cells = (CellData*)&cells_storage[0];
d_cellDataList.d2h(&cells[0], nCells);
int cellL[33] = {0};
int np=0;
for (int i = 0; i < nCells; i++)
{
const CellData cell = cells[i];
assert(cell.level() >= 0);
assert(cell.level() < 32);
if (cell.isNode())
assert(cell.first() + cell.n() <= nCells);
else
np += cell.pend() - cell.pbeg();
}
fprintf(stderr, "np_leaf= %d\n", np);
int addr = 0;
int nlev = 0;
for (int i= 0; i < 32; i++)
{
nlev++;
printf("level= %d ncells= %d %d %d \n", i, cellL[i], addr, addr + cellL[i]);
addr += cellL[i];
if (cellL[i+1] == 0) break;
}
#endif
int2 levels[32];
d_level_begIdx.d2h(levels);
for (int i= 0; i < nLevels; i++)
{
const int2 lv = levels[i];
printf("level= %d ncells= %d %d %d \n", i, lv.y-lv.x+1, lv.x, lv.y+1);
}
#if 0
for (int i = 0; i < nCells; i++)
{
printf("cellIdx= %d isNode= %s: lev= %d first= %d n= %d pbeg= %d pend =%d\n",
i, cells[i].isNode() ? "true ":"false", cells[i].level(),
cells[i].first(), cells[i].n(), cells[i].pbeg(), cells[i].pend());
}
fflush(stdout);
assert(0);
#endif
}
#endif
}
#include "TreecodeInstances.h"
|
d5d1f2c34ec82e21e64599c9d93fddb8f57902c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <chrono>
#include "device_launch_parameters.h"
#include "functionImage.h"
using namespace std::chrono;
using namespace std;
/*
A common strategy is to partition the data into subsets
called "tiles" so that each tile fits into the shared
memory
*/
#define TILE_WIDTH 16 // 256 threads
#define maskCols 5
#define maskRows 5
#define BLOCK_WIDTH (TILE_WIDTH + maskCols -1)
#define clamp(x) (min(max((x), 0.0), 1.0))
__global__ void SharedMemoryConvolution(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
__shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH]; //block of image in shared memory
// allocation in shared memory of image blocks
int maskRadius = maskRows/2;
for (int k = 0; k <channels; k++) {
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x; // flatten the 2D coordinates of the generic thread
int destY = dest/BLOCK_WIDTH; //row of shared memory (makes the inverse operation , in that it calculates the 2D coordinates )
int destX = dest%BLOCK_WIDTH; //col of shared memory (of the generica thread with respect to the shared memory area )
int srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; // index to fetch data from input image
int srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; // index to fetch data from input image
int src = (srcY *width +srcX) * channels + k; // index of input image
// When a thread is to load any input element, test if it is in the valid index range
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory
else
N_ds[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + (TILE_WIDTH * TILE_WIDTH);
destY = dest/BLOCK_WIDTH;
destX = dest%BLOCK_WIDTH;
srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius;
srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius;
src = (srcY *width +srcX) * channels + k;
if(destY < BLOCK_WIDTH){
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src];
else
N_ds[destY][destX] = 0;
}
//Barrier synchronization
__syncthreads();
//compute kernel convolution
float Pvalue = 0;
int y, x;
for (y= 0; y < maskCols; y++)
for(x = 0; x<maskRows; x++)
Pvalue += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * maskCols + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(y < height && x < width)
outputImageData[(y * width + x) * channels + k] = Pvalue;
__syncthreads();
}
}
void imageConvolutionSharedMemory(const char* inputfilepath, const char* outputfilepath ){
int imgChannels;
int imgHeight;
int imgWidth;
Image* imgInput;
Image* imgOutput;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float* deviceMaskData;
float hostMaskData[maskRows * maskCols]={
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06
};
imgInput = import_PPM(inputfilepath);
imgWidth = img_getWidth(imgInput);
imgHeight = img_getHeight(imgInput);
imgChannels = img_getChannels(imgInput);
imgOutput = Image_new(imgWidth, imgHeight, imgChannels);
hostInputImageData = img_getData(imgInput);
hostOutputImageData = img_getData(imgOutput);
hipDeviceReset();
hipMalloc((void **) &deviceInputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskRows * maskCols
* sizeof(float));
hipMemcpy(deviceInputImageData, hostInputImageData,
imgWidth * imgHeight * imgChannels * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData,
maskRows * maskCols * sizeof(float),
hipMemcpyHostToDevice);
dim3 dimGrid(ceil((float) imgWidth/TILE_WIDTH),
ceil((float) imgHeight/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
cout << "CONVOLUTION SHARED MEMORY" << endl;
cout << "Image dimensions : " << imgWidth << "x" << imgHeight << " , Channels : " << imgChannels << endl;
cout << "Time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
hipLaunchKernelGGL(( SharedMemoryConvolution), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData,
imgChannels, imgWidth, imgHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << " millisec" <<endl;
cout << "----------------------------------" << endl;
hipMemcpy(hostOutputImageData, deviceOutputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float), hipMemcpyDeviceToHost);
write_image(outputfilepath, imgOutput);
hipMemset(deviceInputImageData,0,imgWidth * imgHeight *
imgChannels * sizeof(float));
hipMemset(deviceOutputImageData,0,imgWidth * imgHeight *
imgChannels * sizeof(float));
hipMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
Image_delete(imgOutput);
Image_delete(imgInput);
}
int main(){
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoSD.ppm","/home/aventuri/progetto/sharedmemory/resultSDSM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoHD1.ppm","/home/aventuri/progetto/sharedmemory/resultHD1SM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoHD2.ppm","/home/aventuri/progetto/sharedmemory/resultHD2SM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photo4K.ppm","/home/aventuri/progetto/sharedmemory/result4KSM.ppm");
} | d5d1f2c34ec82e21e64599c9d93fddb8f57902c2.cu | #include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <iostream>
#include <cuda_runtime.h>
#include <time.h>
#include <chrono>
#include "device_launch_parameters.h"
#include "functionImage.h"
using namespace std::chrono;
using namespace std;
/*
A common strategy is to partition the data into subsets
called "tiles" so that each tile fits into the shared
memory
*/
#define TILE_WIDTH 16 // 256 threads
#define maskCols 5
#define maskRows 5
#define BLOCK_WIDTH (TILE_WIDTH + maskCols -1)
#define clamp(x) (min(max((x), 0.0), 1.0))
__global__ void SharedMemoryConvolution(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
__shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH]; //block of image in shared memory
// allocation in shared memory of image blocks
int maskRadius = maskRows/2;
for (int k = 0; k <channels; k++) {
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x; // flatten the 2D coordinates of the generic thread
int destY = dest/BLOCK_WIDTH; //row of shared memory (makes the inverse operation , in that it calculates the 2D coordinates )
int destX = dest%BLOCK_WIDTH; //col of shared memory (of the generica thread with respect to the shared memory area )
int srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; // index to fetch data from input image
int srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; // index to fetch data from input image
int src = (srcY *width +srcX) * channels + k; // index of input image
// When a thread is to load any input element, test if it is in the valid index range
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory
else
N_ds[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + (TILE_WIDTH * TILE_WIDTH);
destY = dest/BLOCK_WIDTH;
destX = dest%BLOCK_WIDTH;
srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius;
srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius;
src = (srcY *width +srcX) * channels + k;
if(destY < BLOCK_WIDTH){
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src];
else
N_ds[destY][destX] = 0;
}
//Barrier synchronization
__syncthreads();
//compute kernel convolution
float Pvalue = 0;
int y, x;
for (y= 0; y < maskCols; y++)
for(x = 0; x<maskRows; x++)
Pvalue += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * maskCols + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(y < height && x < width)
outputImageData[(y * width + x) * channels + k] = Pvalue;
__syncthreads();
}
}
void imageConvolutionSharedMemory(const char* inputfilepath, const char* outputfilepath ){
int imgChannels;
int imgHeight;
int imgWidth;
Image* imgInput;
Image* imgOutput;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float* deviceMaskData;
float hostMaskData[maskRows * maskCols]={
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06,
0.06, 0.06, 0.06, 0.06, 0.06
};
imgInput = import_PPM(inputfilepath);
imgWidth = img_getWidth(imgInput);
imgHeight = img_getHeight(imgInput);
imgChannels = img_getChannels(imgInput);
imgOutput = Image_new(imgWidth, imgHeight, imgChannels);
hostInputImageData = img_getData(imgInput);
hostOutputImageData = img_getData(imgOutput);
cudaDeviceReset();
cudaMalloc((void **) &deviceInputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskCols
* sizeof(float));
cudaMemcpy(deviceInputImageData, hostInputImageData,
imgWidth * imgHeight * imgChannels * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData,
maskRows * maskCols * sizeof(float),
cudaMemcpyHostToDevice);
dim3 dimGrid(ceil((float) imgWidth/TILE_WIDTH),
ceil((float) imgHeight/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
cout << "CONVOLUTION SHARED MEMORY" << endl;
cout << "Image dimensions : " << imgWidth << "x" << imgHeight << " , Channels : " << imgChannels << endl;
cout << "Time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
SharedMemoryConvolution<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData,
imgChannels, imgWidth, imgHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << " millisec" <<endl;
cout << "----------------------------------" << endl;
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imgWidth * imgHeight *
imgChannels * sizeof(float), cudaMemcpyDeviceToHost);
write_image(outputfilepath, imgOutput);
cudaMemset(deviceInputImageData,0,imgWidth * imgHeight *
imgChannels * sizeof(float));
cudaMemset(deviceOutputImageData,0,imgWidth * imgHeight *
imgChannels * sizeof(float));
cudaMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
Image_delete(imgOutput);
Image_delete(imgInput);
}
int main(){
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoSD.ppm","/home/aventuri/progetto/sharedmemory/resultSDSM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoHD1.ppm","/home/aventuri/progetto/sharedmemory/resultHD1SM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photoHD2.ppm","/home/aventuri/progetto/sharedmemory/resultHD2SM.ppm");
imageConvolutionSharedMemory("/home/aventuri/progetto/sharedmemory/photo4K.ppm","/home/aventuri/progetto/sharedmemory/result4KSM.ppm");
} |
d304e279fff5a670144f44ec1eaaf7a1347956b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/hw_roi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
//template <typename Dtype>
//__global__ void PSROIPoolingForward(
// const int nthreads,
// const Dtype* bottom_data,
// const Dtype spatial_scale,
// const int channels,
// const int height, const int width,
// const int pooled_height, const int pooled_width,
// const Dtype* bottom_rois,
// const int output_dim,
// const int group_size,
// Dtype* top_data,
// int* mapping_channel) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // The output is in order (n, ctop, ph, pw)
// int pw = index % pooled_width;
// int ph = (index / pooled_width) % pooled_height;
// int ctop = (index / pooled_width / pooled_height) % output_dim;
// int n = index / pooled_width / pooled_height / output_dim;
// // [start, end) interval for spatial sampling
// bottom_rois += n * 5;
// int roi_batch_ind = bottom_rois[0];
// Dtype roi_start_w =
// static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
// Dtype roi_start_h =
// static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
// Dtype roi_end_w =
// static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
// Dtype roi_end_h =
// static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// // Force too small ROIs to be 1x1
// Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// // Compute w and h at bottom
// Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
// Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
// int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
// + roi_start_h);
// int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
// + roi_start_w);
// int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
// + roi_start_h);
// int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
// + roi_start_w);
// // Add roi offsets and clip to input boundaries
// hstart = min(max(hstart, 0), height);
// hend = min(max(hend, 0), height);
// wstart = min(max(wstart, 0), width);
// wend = min(max(wend, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
// int gw = pw;
// int gh = ph;
// int c = (ctop*group_size + gh)*group_size + gw;
// bottom_data += (roi_batch_ind * channels + c) * height * width;
// Dtype out_sum = 0;
// for (int h = hstart; h < hend; ++h) {
// for (int w = wstart; w < wend; ++w) {
// int bottom_index = h*width + w;
// out_sum += bottom_data[bottom_index];
// }
// }
// Dtype bin_area = (hend - hstart)*(wend - wstart);
// top_data[index] = is_empty? 0. : out_sum/bin_area;
// mapping_channel[index] = c;
// }
//}
template <typename Dtype>
__global__ void HWROIPoolingForward(
const int nthreads,
const Dtype* const bottom_data,
const Dtype spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const Dtype* bottom_rois,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
/*
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
*/
const Dtype* bottom_data_ = bottom_data + (roi_batch_ind * channels + ctop) * height * width;
//pooling
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data_[bottom_index];
}
}
Dtype bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? 0. : out_sum/bin_area;
//mapping_channel[index] = c;
}
}
template <typename Dtype>
void HWROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
// NOLINT_NEXT_LINE(whitespace/operators)
HWROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count,
bottom_data,
spatial_scale_,
channels_,
height_,
width_,
pooled_height_,
pooled_width_,
bottom_rois,
top_data);
CUDA_POST_KERNEL_CHECK;
}
//template <typename Dtype>
//__global__ void PSROIPoolingBackwardAtomic(
// const int nthreads,
// const Dtype* top_diff,
// const int* mapping_channel,
// const int num_rois,
// const Dtype spatial_scale,
// const int channels,
// const int height, const int width,
// const int pooled_height, const int pooled_width,
// const int output_dim,
// Dtype* bottom_diff,
// const Dtype* bottom_rois) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // The output is in order (n, ctop, ph, pw)
// int pw = index % pooled_width;
// int ph = (index / pooled_width) % pooled_height;
// int n = index / pooled_width / pooled_height / output_dim;
// // [start, end) interval for spatial sampling
// bottom_rois += n * 5;
// int roi_batch_ind = bottom_rois[0];
// Dtype roi_start_w =
// static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
// Dtype roi_start_h =
// static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
// Dtype roi_end_w =
// static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
// Dtype roi_end_h =
// static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// // Force too small ROIs to be 1x1
// Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// // Compute w and h at bottom
// Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
// Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
// int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
// + roi_start_h);
// int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
// + roi_start_w);
// int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
// + roi_start_h);
// int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
// + roi_start_w);
// // Add roi offsets and clip to input boundaries
// hstart = min(max(hstart, 0), height);
// hend = min(max(hend, 0), height);
// wstart = min(max(wstart, 0), width);
// wend = min(max(wend, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
// // Compute c at bottom
// int c = mapping_channel[index];
// Dtype* offset_bottom_diff = bottom_diff +
// (roi_batch_ind * channels + c) * height * width;
// Dtype bin_area = (hend - hstart)*(wend - wstart);
// Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
// for (int h = hstart; h < hend; ++h) {
// for (int w = wstart; w < wend; ++w) {
// int bottom_index = h*width + w;
// caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
// }
// }
// }
//}
template <typename Dtype>
__global__ void HWROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const Dtype spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
Dtype* const bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
//int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void HWROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
//const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
HWROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff,
spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_,bottom_diff,
bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(HWROIPoolingLayer);
} // namespace caffe
| d304e279fff5a670144f44ec1eaaf7a1347956b3.cu | // --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/hw_roi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
//template <typename Dtype>
//__global__ void PSROIPoolingForward(
// const int nthreads,
// const Dtype* bottom_data,
// const Dtype spatial_scale,
// const int channels,
// const int height, const int width,
// const int pooled_height, const int pooled_width,
// const Dtype* bottom_rois,
// const int output_dim,
// const int group_size,
// Dtype* top_data,
// int* mapping_channel) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // The output is in order (n, ctop, ph, pw)
// int pw = index % pooled_width;
// int ph = (index / pooled_width) % pooled_height;
// int ctop = (index / pooled_width / pooled_height) % output_dim;
// int n = index / pooled_width / pooled_height / output_dim;
// // [start, end) interval for spatial sampling
// bottom_rois += n * 5;
// int roi_batch_ind = bottom_rois[0];
// Dtype roi_start_w =
// static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
// Dtype roi_start_h =
// static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
// Dtype roi_end_w =
// static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
// Dtype roi_end_h =
// static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// // Force too small ROIs to be 1x1
// Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// // Compute w and h at bottom
// Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
// Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
// int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
// + roi_start_h);
// int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
// + roi_start_w);
// int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
// + roi_start_h);
// int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
// + roi_start_w);
// // Add roi offsets and clip to input boundaries
// hstart = min(max(hstart, 0), height);
// hend = min(max(hend, 0), height);
// wstart = min(max(wstart, 0), width);
// wend = min(max(wend, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
// int gw = pw;
// int gh = ph;
// int c = (ctop*group_size + gh)*group_size + gw;
// bottom_data += (roi_batch_ind * channels + c) * height * width;
// Dtype out_sum = 0;
// for (int h = hstart; h < hend; ++h) {
// for (int w = wstart; w < wend; ++w) {
// int bottom_index = h*width + w;
// out_sum += bottom_data[bottom_index];
// }
// }
// Dtype bin_area = (hend - hstart)*(wend - wstart);
// top_data[index] = is_empty? 0. : out_sum/bin_area;
// mapping_channel[index] = c;
// }
//}
template <typename Dtype>
__global__ void HWROIPoolingForward(
const int nthreads,
const Dtype* const bottom_data,
const Dtype spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const Dtype* bottom_rois,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
/*
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
*/
const Dtype* bottom_data_ = bottom_data + (roi_batch_ind * channels + ctop) * height * width;
//均值pooling
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data_[bottom_index];
}
}
Dtype bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? 0. : out_sum/bin_area;
//mapping_channel[index] = c;
}
}
template <typename Dtype>
void HWROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
// NOLINT_NEXT_LINE(whitespace/operators)
HWROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count,
bottom_data,
spatial_scale_,
channels_,
height_,
width_,
pooled_height_,
pooled_width_,
bottom_rois,
top_data);
CUDA_POST_KERNEL_CHECK;
}
//template <typename Dtype>
//__global__ void PSROIPoolingBackwardAtomic(
// const int nthreads,
// const Dtype* top_diff,
// const int* mapping_channel,
// const int num_rois,
// const Dtype spatial_scale,
// const int channels,
// const int height, const int width,
// const int pooled_height, const int pooled_width,
// const int output_dim,
// Dtype* bottom_diff,
// const Dtype* bottom_rois) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // The output is in order (n, ctop, ph, pw)
// int pw = index % pooled_width;
// int ph = (index / pooled_width) % pooled_height;
// int n = index / pooled_width / pooled_height / output_dim;
// // [start, end) interval for spatial sampling
// bottom_rois += n * 5;
// int roi_batch_ind = bottom_rois[0];
// Dtype roi_start_w =
// static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
// Dtype roi_start_h =
// static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
// Dtype roi_end_w =
// static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
// Dtype roi_end_h =
// static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// // Force too small ROIs to be 1x1
// Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// // Compute w and h at bottom
// Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
// Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
// int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
// + roi_start_h);
// int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
// + roi_start_w);
// int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
// + roi_start_h);
// int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
// + roi_start_w);
// // Add roi offsets and clip to input boundaries
// hstart = min(max(hstart, 0), height);
// hend = min(max(hend, 0), height);
// wstart = min(max(wstart, 0), width);
// wend = min(max(wend, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
// // Compute c at bottom
// int c = mapping_channel[index];
// Dtype* offset_bottom_diff = bottom_diff +
// (roi_batch_ind * channels + c) * height * width;
// Dtype bin_area = (hend - hstart)*(wend - wstart);
// Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
// for (int h = hstart; h < hend; ++h) {
// for (int w = wstart; w < wend; ++w) {
// int bottom_index = h*width + w;
// caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
// }
// }
// }
//}
template <typename Dtype>
__global__ void HWROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const Dtype spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
Dtype* const bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
//int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void HWROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
//const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
HWROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff,
spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_,bottom_diff,
bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(HWROIPoolingLayer);
} // namespace caffe
|
26182e6ad6336530f298fec432cb5f2d17984b56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _OneHot(
const int nthreads,
const int depth,
const int on_value,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int val = x[i];
y[i * depth + val] = (T)on_value;
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void OneHot<T, CUDAContext>( \
const int count, \
const int depth, \
const int on_value, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _OneHot), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
count, depth, on_value, x, y); \
}
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| 26182e6ad6336530f298fec432cb5f2d17984b56.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _OneHot(
const int nthreads,
const int depth,
const int on_value,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int val = x[i];
y[i * depth + val] = (T)on_value;
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void OneHot<T, CUDAContext>( \
const int count, \
const int depth, \
const int on_value, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
_OneHot<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
count, depth, on_value, x, y); \
}
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
5e8e8259c1e42dcac7be67a72a9c1ca0da98a105.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_11.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5169285143903,0.00130428563365721,0.778447364000152,0.778294777556242,0.000176052463458467,0.484556008731646,0.00295106634206365,0.999998331220203,1.95009865571766e-08,1.90405217604297e-05,0.999773931735328,1.00732337673749,0.999997839287066,3.97912244489960e-05,0.947578224058516,9.53631582857868,139.823425609239};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9822763642886,0.000336521649878696,0.000144542916642332,0.000516942526086760,0.253138096656416,0.171109018622005,0.130336142672705,3.88071468613803,0.0154855862471817,2.16547576686118,1091.40643117116,0.000575140596221629,0.180541766553447,0.0183755879605413,0.00807832472755813,1.82509834179719e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 5e8e8259c1e42dcac7be67a72a9c1ca0da98a105.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_11.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5169285143903,0.00130428563365721,0.778447364000152,0.778294777556242,0.000176052463458467,0.484556008731646,0.00295106634206365,0.999998331220203,1.95009865571766e-08,1.90405217604297e-05,0.999773931735328,1.00732337673749,0.999997839287066,3.97912244489960e-05,0.947578224058516,9.53631582857868,139.823425609239};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9822763642886,0.000336521649878696,0.000144542916642332,0.000516942526086760,0.253138096656416,0.171109018622005,0.130336142672705,3.88071468613803,0.0154855862471817,2.16547576686118,1091.40643117116,0.000575140596221629,0.180541766553447,0.0183755879605413,0.00807832472755813,1.82509834179719e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
d5c8dc73cc28fcfcb1800609b8ec2a61430d01db.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/search.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
struct dispatch_scalar_index {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(size_type index,
bool is_valid,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return std::make_unique<numeric_scalar<IndexType>>(index, is_valid, stream, mr);
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("indices must be an integral type");
}
};
/**
* @brief Find index of a given key within a dictionary's keys column.
*
* The index is the position within the keys column where the given key (scalar) is found.
* The keys column is sorted and unique so only one value is expected.
* The result is an integer scalar identifying the index value.
* If the key is not found, the resulting scalar has `is_valid()=false`.
*/
struct find_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same<Element, dictionary32>::value and
not std::is_same<Element, list_view>::value and
not std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid())
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::equal_range(thrust::device, // segfaults: rmm::exec_policy(stream) and
// thrust::hip::par.on(stream)
keys_view->begin<Element>(),
keys_view->end<Element>(),
find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter.first),
(thrust::distance(iter.first, iter.second) > 0),
stream,
mr);
}
template <typename Element,
std::enable_if_t<std::is_same<Element, dictionary32>::value or
std::is_same<Element, list_view>::value or
std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL(
"dictionary, list_view, and struct_view columns cannot be the keys column of a dictionary");
}
};
struct find_insert_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same<Element, dictionary32>::value and
not std::is_same<Element, list_view>::value and
not std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid())
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::lower_bound(
rmm::exec_policy(stream), keys_view->begin<Element>(), keys_view->end<Element>(), find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter),
true,
stream,
mr);
}
template <typename Element,
std::enable_if_t<std::is_same<Element, dictionary32>::value or
std::is_same<Element, list_view>::value or
std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL("dictionary, list_view, and struct_view columns cannot be the keys for a dictionary");
}
};
} // namespace
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_index_fn(), dictionary, key, stream, mr);
}
std::unique_ptr<scalar> get_insert_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_insert_index_fn(), dictionary, key, stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_index(dictionary, key, rmm::cuda_stream_default, mr);
}
} // namespace dictionary
} // namespace cudf
| d5c8dc73cc28fcfcb1800609b8ec2a61430d01db.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/search.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
struct dispatch_scalar_index {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(size_type index,
bool is_valid,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return std::make_unique<numeric_scalar<IndexType>>(index, is_valid, stream, mr);
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("indices must be an integral type");
}
};
/**
* @brief Find index of a given key within a dictionary's keys column.
*
* The index is the position within the keys column where the given key (scalar) is found.
* The keys column is sorted and unique so only one value is expected.
* The result is an integer scalar identifying the index value.
* If the key is not found, the resulting scalar has `is_valid()=false`.
*/
struct find_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same<Element, dictionary32>::value and
not std::is_same<Element, list_view>::value and
not std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid())
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::equal_range(thrust::device, // segfaults: rmm::exec_policy(stream) and
// thrust::cuda::par.on(stream)
keys_view->begin<Element>(),
keys_view->end<Element>(),
find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter.first),
(thrust::distance(iter.first, iter.second) > 0),
stream,
mr);
}
template <typename Element,
std::enable_if_t<std::is_same<Element, dictionary32>::value or
std::is_same<Element, list_view>::value or
std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL(
"dictionary, list_view, and struct_view columns cannot be the keys column of a dictionary");
}
};
struct find_insert_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same<Element, dictionary32>::value and
not std::is_same<Element, list_view>::value and
not std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid())
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::lower_bound(
rmm::exec_policy(stream), keys_view->begin<Element>(), keys_view->end<Element>(), find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter),
true,
stream,
mr);
}
template <typename Element,
std::enable_if_t<std::is_same<Element, dictionary32>::value or
std::is_same<Element, list_view>::value or
std::is_same<Element, struct_view>::value>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL("dictionary, list_view, and struct_view columns cannot be the keys for a dictionary");
}
};
} // namespace
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_index_fn(), dictionary, key, stream, mr);
}
std::unique_ptr<scalar> get_insert_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_insert_index_fn(), dictionary, key, stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_index(dictionary, key, rmm::cuda_stream_default, mr);
}
} // namespace dictionary
} // namespace cudf
|
40981503c81ddeb9394bd3cc731efc362b39f31f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Detected 1 CUDA Capable device(s)
Device 0: "GeForce GT 320M"
CUDA Driver Version / Runtime Version 5.0 / 5.0
CUDA Capability Major/Minor version number: 1.2
Total amount of global memory: 1024 MBytes (1073741824 bytes)
( 3) Multiprocessors x ( 8) CUDA Cores/MP: 24 CUDA Cores
GPU Clock rate: 1100 MHz (1.10 GHz)
Memory Clock rate: 790 Mhz
Memory Bus Width: 128-bit
Max Texture Dimension Size (x,y,z) 1D=(8192), 2D=(65536,32768), 3D=(2048,2048,2048)
Max Layered Texture Size (dim) x layers 1D=(8192) x 512, 2D=(8192,8192) x 512
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 16384 bytes
Total number of registers available per block: 16384
Warp size: 32
Maximum number of threads per multiprocessor: 1024
Maximum number of threads per block: 512
Maximum sizes of each dimension of a block: 512 x 512 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 1
Maximum memory pitch: 2147483647 bytes
Texture alignment: 256 bytes
Concurrent copy and kernel execution: Yes with 1 copy engine(s)
Run time limit on kernels: Yes
Integrated GPU sharing Host Memory: No
Support host page-locked memory mapping: Yes
Alignment requirement for Surfaces: Yes
Device has ECC support: Disabled
CUDA Device Driver Mode (TCC or WDDM): WDDM (Windows Display Driver Model)
Device supports Unified Addressing (UVA): No
Device PCI Bus ID / PCI location ID: 2 / 0
Compute Mode:
< Default (multiple host threads can use ::hipSetDevice() with device simultaneously) >
deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.0, CUDA Runtime Version = 5.0, NumDevs = 1, Device0 = GeForce GT 320M
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include "helper_functions.h"
//#include "helper_cuda.h"
#include <stdio.h>
#include "makeDat.h"
__global__ void freqencyStep1(char *d_dat,int len, int *d_freq)
{//share memory
///share memoryshare memory260127
///bank conflict
__shared__ int sfreq[3456];//27*128////share memory27int.
for(int i=threadIdx.x ;i< 3456;i += blockDim.x)
sfreq[i] = 0;////
__syncthreads();
int *myfreq = &sfreq[27*threadIdx.x];
int gridsize = blockDim.x * gridDim.x;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
//if((d_dat[i]>='a')&&(d_dat[i]<='z'))//a--zif
myfreq[d_dat[i]-'a']++;
__syncthreads();///sharememory
///
for(int roll = 64;roll>=1; roll>>=1)
{
if(threadIdx.x <roll)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+roll)+i];
}
__syncthreads();
}
#if 0
if(threadIdx.x<64)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+64)+i];
}
__syncthreads();
if(threadIdx.x<32)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+32)+i];
}
__syncthreads();
if(threadIdx.x<16)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+16)+i];
}
if(threadIdx.x< 8)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 8)+i];
}
if(threadIdx.x< 4)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 4)+i];
}
if(threadIdx.x< 2)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 2)+i];
}
if(threadIdx.x == 0)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x )+i];
}
#endif
__syncthreads();
if(threadIdx.x<26)/// 1.132
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
}
__global__ void freqencyMethod2(char *d_dat,int len, int *d_freq)
{//share memory
__shared__ int sfreq[26];//
if(threadIdx.x < 26)
sfreq[threadIdx.x] = 0;////
__syncthreads();
int gridsize = blockDim.x * gridDim.x;
int pos = 0;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
{
pos = d_dat[i]-'a';
atomicAdd(&sfreq[pos],1);
}
__syncthreads();
if(threadIdx.x<26)/// 1.132
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
}
void hostCalc(char *dat,int len,int *freqency)
{
int freque[32];
memset(freque,0,32*sizeof(int));
for(int i=0;i<len;i++)
{
if((dat[i]>='a')&&(dat[i]<='z'))
freque[dat[i]-'a']++;
}
memcpy(freqency,freque,26*sizeof(int));
}
int main(int argc,char **argv)
{
//makeData("char26.dat",104857600);
//return 0;
if(argc<2)
{
fprintf(stdout,"usage: a.out datfile\n");
return -1;
}
FILE *fr = NULL;
if((fr = fopen(argv[1],"r"))==NULL)
{
fprintf(stderr,"can't open file %s\n",argv[1]);
return -1;
}
fseek(fr,0,2);
int len = ftell(fr);
rewind(fr);
len = (len-(len&4095))+4096;
char *dat = new char[len];
memset(dat,0,len);
len = fread(dat,1,len,fr);
fclose(fr);
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
free(dat);
return -1;
}
char *d_dat;
int *d_freq;
int gpuFreq[32];
int cpuFreq[32];
hipEvent_t start, stop;
clock_t t0,t1,t2;
float cptime,runtime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );//
t0 = clock();
cudaStatus = hipMalloc((void **)&d_dat,len*sizeof(char));
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
free(dat);
return -1;
}
cudaStatus = hipMalloc((void **)&d_freq,32*sizeof(int));
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
hipFree(d_dat);
free(dat);
return -1;
}
hipMemcpy(d_dat,dat,len*sizeof(char),hipMemcpyHostToDevice);
hipMemset(d_freq,0,32*sizeof(int));
hipEventRecord( stop, 0 );////
hipEventSynchronize( stop );
hipEventElapsedTime( &cptime, start, stop );
t1 = clock();
hipLaunchKernelGGL(( freqencyStep1), dim3(256),dim3(128), 0, 0, d_dat,len,d_freq);
// freqencyMethod2<<<256,128>>>(d_dat,len,d_freq);
cudaStatus = hipDeviceSynchronize();
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
hipFree(d_freq);
hipFree(d_dat);
free(dat);
return -1;
}
hipEventRecord( start, 0 );////
hipEventSynchronize( start );
hipEventElapsedTime( &runtime, stop,start );///
t2 = clock();
hipMemcpy(gpuFreq,d_freq,32*sizeof(int),hipMemcpyDeviceToHost);
clock_t ht0 = clock();
hipEventRecord( start, 0 );////
hipEventSynchronize( start );
hostCalc(dat, len,cpuFreq);
hipEventRecord( stop, 0 );////
hipEventSynchronize( stop );
clock_t ht1 = clock();
float hruntime =0.0f;
hipEventElapsedTime( &hruntime, start,stop );///
hipFree(d_freq);
hipFree(d_dat);
///check
if(memcmp(gpuFreq,cpuFreq,26*sizeof(int))!=0)
fprintf(stdout,"CHECK ERROR\n");
else
fprintf(stdout,"CHECK OK\n");
free(dat);
hipEventDestroy( start );
hipEventDestroy( stop );
printf("cptime %9.4f ms runtime %9.4f ms\n",cptime,runtime);
printf("t1-t0=%d t2-t1 = %d \n",t1-t0,t2-t1);
printf("host run time = %9.4f ms %d \n",hruntime,ht1-ht0);
return 0;
}
| 40981503c81ddeb9394bd3cc731efc362b39f31f.cu | /*
Detected 1 CUDA Capable device(s)
Device 0: "GeForce GT 320M"
CUDA Driver Version / Runtime Version 5.0 / 5.0
CUDA Capability Major/Minor version number: 1.2
Total amount of global memory: 1024 MBytes (1073741824 bytes)
( 3) Multiprocessors x ( 8) CUDA Cores/MP: 24 CUDA Cores
GPU Clock rate: 1100 MHz (1.10 GHz)
Memory Clock rate: 790 Mhz
Memory Bus Width: 128-bit
Max Texture Dimension Size (x,y,z) 1D=(8192), 2D=(65536,32768), 3D=(2048,2048,2048)
Max Layered Texture Size (dim) x layers 1D=(8192) x 512, 2D=(8192,8192) x 512
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 16384 bytes
Total number of registers available per block: 16384
Warp size: 32
Maximum number of threads per multiprocessor: 1024
Maximum number of threads per block: 512
Maximum sizes of each dimension of a block: 512 x 512 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 1
Maximum memory pitch: 2147483647 bytes
Texture alignment: 256 bytes
Concurrent copy and kernel execution: Yes with 1 copy engine(s)
Run time limit on kernels: Yes
Integrated GPU sharing Host Memory: No
Support host page-locked memory mapping: Yes
Alignment requirement for Surfaces: Yes
Device has ECC support: Disabled
CUDA Device Driver Mode (TCC or WDDM): WDDM (Windows Display Driver Model)
Device supports Unified Addressing (UVA): No
Device PCI Bus ID / PCI location ID: 2 / 0
Compute Mode:
< Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >
deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.0, CUDA Runtime Version = 5.0, NumDevs = 1, Device0 = GeForce GT 320M
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include "helper_functions.h"
//#include "helper_cuda.h"
#include <stdio.h>
#include "makeDat.h"
__global__ void freqencyStep1(char *d_dat,int len, int *d_freq)
{//步骤一,先将数据加和到share memory中,然后再累加到显存上。
///这里也有两种方法,这是方法一,share memory横着用。另一种方法,将share memory竖着用(在进行块内累加时,只用前26个线程完成0到127的累加。。
///方法二的累加时,最好累加到对角线上,然后在写出时,可以避免bank conflict。
__shared__ int sfreq[3456];//27*128////share memory横着放,每线程27个int.
for(int i=threadIdx.x ;i< 3456;i += blockDim.x)
sfreq[i] = 0;////先清空。
__syncthreads();
int *myfreq = &sfreq[27*threadIdx.x];
int gridsize = blockDim.x * gridDim.x;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
//if((d_dat[i]>='a')&&(d_dat[i]<='z'))//如果确定数据只是a--z,可以把if去掉。
myfreq[d_dat[i]-'a']++;
__syncthreads();///各线程统计到自己的sharememory中。
///用一个循环实现折半加。
for(int roll = 64;roll>=1; roll>>=1)
{
if(threadIdx.x <roll)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+roll)+i];
}
__syncthreads();
}
#if 0
if(threadIdx.x<64)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+64)+i];
}
__syncthreads();
if(threadIdx.x<32)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+32)+i];
}
__syncthreads();
if(threadIdx.x<16)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+16)+i];
}
if(threadIdx.x< 8)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 8)+i];
}
if(threadIdx.x< 4)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 4)+i];
}
if(threadIdx.x< 2)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 2)+i];
}
if(threadIdx.x == 0)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x )+i];
}
#endif
__syncthreads();
if(threadIdx.x<26)///如果显卡支持原子加,可以使用原子加,直接加到显存上。那样就没有第二步。 1.1及以上支持全局显存的32位原子操作。
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
}
__global__ void freqencyMethod2(char *d_dat,int len, int *d_freq)
{//方法二,先将数据原子加到share memory中,然后再累加到显存上。
__shared__ int sfreq[26];//
if(threadIdx.x < 26)
sfreq[threadIdx.x] = 0;////先清空。
__syncthreads();
int gridsize = blockDim.x * gridDim.x;
int pos = 0;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
{
pos = d_dat[i]-'a';
atomicAdd(&sfreq[pos],1);
}
__syncthreads();
if(threadIdx.x<26)///如果显卡支持原子加,可以使用原子加,直接加到显存上。那样就没有第二步。 1.1及以上支持全局显存的32位原子操作。
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
}
void hostCalc(char *dat,int len,int *freqency)
{
int freque[32];
memset(freque,0,32*sizeof(int));
for(int i=0;i<len;i++)
{
if((dat[i]>='a')&&(dat[i]<='z'))
freque[dat[i]-'a']++;
}
memcpy(freqency,freque,26*sizeof(int));
}
int main(int argc,char **argv)
{
//makeData("char26.dat",104857600);
//return 0;
if(argc<2)
{
fprintf(stdout,"usage: a.out datfile\n");
return -1;
}
FILE *fr = NULL;
if((fr = fopen(argv[1],"r"))==NULL)
{
fprintf(stderr,"can't open file %s\n",argv[1]);
return -1;
}
fseek(fr,0,2);
int len = ftell(fr);
rewind(fr);
len = (len-(len&4095))+4096;
char *dat = new char[len];
memset(dat,0,len);
len = fread(dat,1,len,fr);
fclose(fr);
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
free(dat);
return -1;
}
char *d_dat;
int *d_freq;
int gpuFreq[32];
int cpuFreq[32];
cudaEvent_t start, stop;
clock_t t0,t1,t2;
float cptime,runtime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );//记录时间点
t0 = clock();
cudaStatus = cudaMalloc((void **)&d_dat,len*sizeof(char));
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
free(dat);
return -1;
}
cudaStatus = cudaMalloc((void **)&d_freq,32*sizeof(int));
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
cudaFree(d_dat);
free(dat);
return -1;
}
cudaMemcpy(d_dat,dat,len*sizeof(char),cudaMemcpyHostToDevice);
cudaMemset(d_freq,0,32*sizeof(int));
cudaEventRecord( stop, 0 );////记录时间点
cudaEventSynchronize( stop );
cudaEventElapsedTime( &cptime, start, stop );
t1 = clock();
freqencyStep1<<<256,128>>>(d_dat,len,d_freq);
// freqencyMethod2<<<256,128>>>(d_dat,len,d_freq);
cudaStatus = cudaThreadSynchronize();
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
cudaFree(d_freq);
cudaFree(d_dat);
free(dat);
return -1;
}
cudaEventRecord( start, 0 );////记录时间点
cudaEventSynchronize( start );
cudaEventElapsedTime( &runtime, stop,start );///掉个个来用。
t2 = clock();
cudaMemcpy(gpuFreq,d_freq,32*sizeof(int),cudaMemcpyDeviceToHost);
clock_t ht0 = clock();
cudaEventRecord( start, 0 );////记录时间点
cudaEventSynchronize( start );
hostCalc(dat, len,cpuFreq);
cudaEventRecord( stop, 0 );////记录时间点
cudaEventSynchronize( stop );
clock_t ht1 = clock();
float hruntime =0.0f;
cudaEventElapsedTime( &hruntime, start,stop );///主机计算的时间。
cudaFree(d_freq);
cudaFree(d_dat);
///check
if(memcmp(gpuFreq,cpuFreq,26*sizeof(int))!=0)
fprintf(stdout,"CHECK ERROR\n");
else
fprintf(stdout,"CHECK OK\n");
free(dat);
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("cptime %9.4f ms runtime %9.4f ms\n",cptime,runtime);
printf("t1-t0=%d t2-t1 = %d \n",t1-t0,t2-t1);
printf("host run time = %9.4f ms %d \n",hruntime,ht1-ht0);
return 0;
}
|
0268391095193dea53ce9f2907acca8cdf6a3d87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <limits>
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/softmax_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
const int CUDA_NUM_THREADS = 512;
extern __shared__ char tile[];
template <typename dtype>
__global__ void sharemem_softmax_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
int inner_num,
int outer_num,
int axis_size) {
dtype* data = reinterpret_cast<dtype*>(tile) + threadIdx.x;
//! compute thread index and real data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int blocksize = blockDim.x;
int real_index = idx_outer * inner_num + idx_inner;
int loop_idx = real_index;
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[loop_idx];
loop_idx += inner_num;
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt) {
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
dtype* dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
loop_idx = real_index;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[loop_idx] = data[i * blocksize] / sum;
loop_idx += inner_num;
}
}
}
//! general kernel for softmax
template <typename dtype>
__global__ void softmax_max_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
dtype min_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int real_index = idx_outer * inner_num + idx_inner;
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data =
in_data[real_index] > max_data ? in_data[real_index] : max_data;
real_index += inner_num;
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
const dtype* max_data,
dtype* sum_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype max_data_cur = max_data[idx];
dtype sum_data_cur = 0;
int real_index = idx_outer * inner_num + idx_inner;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[real_index] = sub_data;
real_index += inner_num;
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_divid_output_kernel(int total_size,
dtype* io_data,
const dtype* sum_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype sum_data_cur = 1.f / sum_data[idx];
int real_index = idx_outer * inner_num + idx_inner;
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[real_index] = io_data[real_index] * sum_data_cur;
real_index += inner_num;
}
}
}
void SoftmaxCompute::PrepareForRun() {
int device_id;
hipGetDevice(&device_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device_id);
sharedmem_size_ = deviceProp.sharedMemPerBlock;
max_dimsize_ = sharedmem_size_ / sizeof(float) / CUDA_NUM_THREADS;
}
void SoftmaxCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
auto x_dims = param.x->dims();
auto x_rank = x_dims.size();
int axis = param.axis;
if (axis < 0) {
axis += x_rank;
}
int outer_num = x_dims.Slice(0, axis).production();
int inner_num = x_dims.Slice(axis + 1, x_rank).production();
int total_threads = inner_num * outer_num;
axis_size_ = x_dims[axis];
const int threads = CUDA_NUM_THREADS;
const int blocks = (total_threads + threads - 1) / threads;
auto input_data = param.x->data<float>();
auto output_data = param.output->mutable_data<float>(TARGET(kCUDA));
if (axis_size_ <= max_dimsize_) {
int use_sharemem_size = axis_size_ * threads * sizeof(float);
hipLaunchKernelGGL(( sharemem_softmax_kernel), dim3(blocks), dim3(threads), use_sharemem_size, stream,
total_threads,
input_data,
output_data,
inner_num,
outer_num,
axis_size_);
} else {
//! re_alloc device memory
tmax_data_.Resize({1, 1, 1, outer_num * inner_num});
tsum_data_.Resize({1, 1, 1, outer_num * inner_num});
auto max_data = tmax_data_.mutable_data<float>(TARGET(kCUDA));
auto sum_data = tsum_data_.mutable_data<float>(TARGET(kCUDA));
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::lowest();
hipLaunchKernelGGL(( softmax_max_kernel<float>), dim3(blocks), dim3(threads), 0, stream, total_threads,
input_data,
max_data,
min_data,
inner_num,
outer_num,
axis_size_);
//! then, compute exp and sum data
hipLaunchKernelGGL(( softmax_sub_exp_sum_kernel<float>), dim3(blocks), dim3(threads), 0, stream,
total_threads,
input_data,
output_data,
max_data,
sum_data,
inner_num,
outer_num,
axis_size_);
//! last, compute divided output
hipLaunchKernelGGL(( softmax_divid_output_kernel<float>), dim3(blocks), dim3(threads), 0, stream,
total_threads, output_data, sum_data, inner_num, outer_num, axis_size_);
}
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(ERROR) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(softmax,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SoftmaxCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("axis",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
REGISTER_LITE_KERNEL(search_seq_softmax,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SoftmaxCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out_log", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
| 0268391095193dea53ce9f2907acca8cdf6a3d87.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <limits>
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/softmax_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
const int CUDA_NUM_THREADS = 512;
extern __shared__ char tile[];
template <typename dtype>
__global__ void sharemem_softmax_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
int inner_num,
int outer_num,
int axis_size) {
dtype* data = reinterpret_cast<dtype*>(tile) + threadIdx.x;
//! compute thread index and real data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int blocksize = blockDim.x;
int real_index = idx_outer * inner_num + idx_inner;
int loop_idx = real_index;
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[loop_idx];
loop_idx += inner_num;
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt) {
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
dtype* dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
loop_idx = real_index;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[loop_idx] = data[i * blocksize] / sum;
loop_idx += inner_num;
}
}
}
//! general kernel for softmax
template <typename dtype>
__global__ void softmax_max_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
dtype min_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int real_index = idx_outer * inner_num + idx_inner;
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data =
in_data[real_index] > max_data ? in_data[real_index] : max_data;
real_index += inner_num;
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_kernel(int total_size,
const dtype* in_data,
dtype* out_data,
const dtype* max_data,
dtype* sum_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype max_data_cur = max_data[idx];
dtype sum_data_cur = 0;
int real_index = idx_outer * inner_num + idx_inner;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[real_index] = sub_data;
real_index += inner_num;
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_divid_output_kernel(int total_size,
dtype* io_data,
const dtype* sum_data,
int inner_num,
int outer_num,
int axis_size) {
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype sum_data_cur = 1.f / sum_data[idx];
int real_index = idx_outer * inner_num + idx_inner;
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[real_index] = io_data[real_index] * sum_data_cur;
real_index += inner_num;
}
}
}
void SoftmaxCompute::PrepareForRun() {
int device_id;
cudaGetDevice(&device_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device_id);
sharedmem_size_ = deviceProp.sharedMemPerBlock;
max_dimsize_ = sharedmem_size_ / sizeof(float) / CUDA_NUM_THREADS;
}
void SoftmaxCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
auto x_dims = param.x->dims();
auto x_rank = x_dims.size();
int axis = param.axis;
if (axis < 0) {
axis += x_rank;
}
int outer_num = x_dims.Slice(0, axis).production();
int inner_num = x_dims.Slice(axis + 1, x_rank).production();
int total_threads = inner_num * outer_num;
axis_size_ = x_dims[axis];
const int threads = CUDA_NUM_THREADS;
const int blocks = (total_threads + threads - 1) / threads;
auto input_data = param.x->data<float>();
auto output_data = param.output->mutable_data<float>(TARGET(kCUDA));
if (axis_size_ <= max_dimsize_) {
int use_sharemem_size = axis_size_ * threads * sizeof(float);
sharemem_softmax_kernel<<<blocks, threads, use_sharemem_size, stream>>>(
total_threads,
input_data,
output_data,
inner_num,
outer_num,
axis_size_);
} else {
//! re_alloc device memory
tmax_data_.Resize({1, 1, 1, outer_num * inner_num});
tsum_data_.Resize({1, 1, 1, outer_num * inner_num});
auto max_data = tmax_data_.mutable_data<float>(TARGET(kCUDA));
auto sum_data = tsum_data_.mutable_data<float>(TARGET(kCUDA));
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::lowest();
softmax_max_kernel<float><<<blocks, threads, 0, stream>>>(total_threads,
input_data,
max_data,
min_data,
inner_num,
outer_num,
axis_size_);
//! then, compute exp and sum data
softmax_sub_exp_sum_kernel<float><<<blocks, threads, 0, stream>>>(
total_threads,
input_data,
output_data,
max_data,
sum_data,
inner_num,
outer_num,
axis_size_);
//! last, compute divided output
softmax_divid_output_kernel<float><<<blocks, threads, 0, stream>>>(
total_threads, output_data, sum_data, inner_num, outer_num, axis_size_);
}
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(ERROR) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(softmax,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SoftmaxCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("axis",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
REGISTER_LITE_KERNEL(search_seq_softmax,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SoftmaxCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out_log", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
|
d7f1a4c3a224aed7b1a1a24c4b14172d5b781b4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 MathInf GmbH, Thomas Viehmann
// Licensed under the BSD-3-Clause license
// This is the GPU implementation of the Connectionist Temporal Loss.
// We mostly follow Graves.
// 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf
// We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based.
// Graves et al call the probabilities y, we use log_probs (also calling them inputs)
// A few optimizations (simmilar to those here, but also some I didn't take) are described in
// 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/ATen.h>
#include "ATen/Dispatch.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include <type_traits>
#include <numeric>
namespace at {
namespace native {
namespace {
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
// __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template<typename target_t>
__device__ static inline int64_t get_target_prime(const target_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
// this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1).
// A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha).
// In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not
// needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead
// is small and the use-case for loss without grad is relatively limited.
// We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do
// target in parallel, even if it means more frequent __syncthreads.
// In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been
// computed when we start a new block_s. This is why we have our own for loop here.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
// bookkeeping
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// first row (t=0), the three equations for alpha_1 above eq (6)
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t la;
switch (s) {
case 0:
la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK];
break;
case 1:
if (target_length > 0) {
la = log_probs_data[lp_batch_offset + lp_char_stride * get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)];
}
else {
la = neginf;
}
break;
default:
la = neginf;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la;
}
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2*target_length+1) {
current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) !=
current_char));
} else {
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s];
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)];
if (la2 > lamax)
lamax = la2;
} else {
la2 = neginf;
}
if (have_three) {
la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)];
if (la3 > lamax)
lamax = la3;
} else {
la3 = neginf;
}
if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend)
lamax = 0;
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = ::log(::exp(la1-lamax)+::exp(la2-lamax)+::exp(la3-lamax))+lamax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char];
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf;
}
}
}
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
// compute the loss (eq (8))
if (threadIdx.x == 0) {
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)];
scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2-1)];
scalar_t m = ((l1 > l2) ? l1 : l2);
m = ((m == neginf) ? 0 : m);
scalar_t log_likelihood = ::log(::exp(l1-m)+::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
// The forward computation. Lot's of admin and a call to the alpha kernel.
// Note: we do not check that the labels are in the valid range. As we use
// them for indexing in the kernels, you'll see memory errors when you
// pass corrupt labels.
// We support both a 2-dimensional tensor as targets (one set of targets in each row) and
// a 1-dimensional tensor where all targets are concatenated (and we use target_lengths
// to figure out where they begin).
// We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the
// backward. The dispatch function will only return the loss.
template<typename scalar_t, ScalarType target_scalar_type>
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
// log_probs: input_len x batch_size x num_labels
// targets [int64]: batch_size x target_length OR sum(target_lengths)
CheckedFrom c = "ctc_loss_gpu";
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to log_probs cuda if it isn't there already
auto log_probs_arg = TensorArg(log_probs, "log_probs", 1);
auto targets_arg = TensorArg(targets, "targets", 2);
checkAllSameGPU(c, {log_probs_arg, targets_arg});
checkScalarType(c, targets_arg, target_scalar_type);
checkDim(c, log_probs_arg, 3);
checkDimRange(c, targets_arg, 1, 3);
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
AT_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
AT_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
AT_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong));
auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
checkSize(c, targets_arg, 0, pos);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = targets.size(1);
checkSize(c, targets_arg, 0, batch_size);
AT_CHECK(targets.size(1) >= max_target_length,
"Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
int64_t max_input_length = log_probs.size(0);
for (int64_t b = 0; b < batch_size; b++) {
AT_CHECK(input_lengths[b] <= max_input_length,
"Expected tensor to have size at least ", max_input_length, " at dimension 1, but got size ", targets.size(0), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong));
Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options());
// Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu...
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
log_alpha.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, BLANK);
THCudaCheck(hipGetLastError()); // catch launch errors
return std::make_tuple(neg_log_likelihood, log_alpha);
}
// The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the
// alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.)
template<typename scalar_t, typename target_t>
__global__ void
__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch)
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t lb;
if (s == 2*target_length) {
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK];
} else if ((target_length > 0) && (s == 2*target_length-1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime];
} else {
lb = neginf;
}
if (s < 2*max_target_length+1) {
log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb;
}
}
// go backward in s
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
int64_t current_target_prime;
bool have_three;
if (s < 2*target_length+1) {
current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s < 2*target_length-1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) !=
current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s];
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)];
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = neginf;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)];
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = neginf;
}
if (lbmax == neginf)
lbmax = 0;
scalar_t lb = ::log(::exp(lb1-lbmax)+::exp(lb2-lbmax)+::exp(lb3-lbmax))+lbmax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb;
} else if ((s < 2*max_target_length+1) && ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length))) {
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = neginf;
}
}
}
}
// This implements the subtrahend of equation (16) for all *nonblank* characters.
// It assumes you have probs in gradient_data when called
// and it modifies gradient_data to be, the gradient.
// In order to facilitate this inplace update, We don't actually do this in logspace.
// (The other variant implemented uses log_space and the differences seem to be
// not so problematic at least with unit normal distributed test activations.)
// Internally this uses atomicAdd because different threads may write to the same
// gradient position.
// This is parallelised over b and s again.
// Note that for us, the Z of eqn (16) is actually constant for all t and it is the
// likelihood - this is why we use the negative log likelihood below.
// We also multiply by the input gradient to keep with standard autograd style.
// I took this trick from [2], for moderate alphabet sizes a log-space
// calculation (with an atomic log add) is similarly in performance, but for large
// alphabets the inplace nature is a considerable advantage.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t s = threadIdx.x + blockIdx.x * blockDim.y; // note, this directly indexes into targets, no targets prime!
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s * tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t t = 0; t < input_length; t++) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target];
atomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
-::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
+ nll - lp) * gr);
}
}
// This is the naive implementation of equation (16). It is parallelised in batch and input timestep.
// It appears to be faster than the above method for small batch sizes.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if ((t >= max_input_length) || (b >= batch_size))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
// collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int s = 0; s < 2*max_target_length+1; s++) {
if ((target_length > 0) && (s < 2*target_length+1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]);
scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime];
if (lcab == neginf) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = ::log(::exp(lcab-max)+::exp(log_alpha_beta-max))+max;
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c];
if (t < input_length) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c];
res = (::exp(lp)-::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
// The backward. It essentially computes eq 16 by using the above kernels.
// We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward.
template<typename scalar_t, ScalarType target_scalar_type>
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to cuda if it isn't there already
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong)));
auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = targets.size(1);
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong));
tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong));
Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta))
// As above, there may be better configurations to use.
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, BLANK);
THCudaCheck(hipGetLastError()); // catch launch errors
}
// Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on
// the (capped) difference of timings.
// Note that for OK problems target length <= input length, so we
// only consider input length.
bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
if (is_large) { // large alphabet, large batch
// this computes the probs, minuend in (16)
exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
auto grad_blank = grad.narrow(2, BLANK, 1);
grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2})
+ log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}),
2, true)
.permute({1, 0, 2})
.add_(neg_log_likelihood.view({1, batch_size, 1}))
.sub_(log_probs.narrow(2, BLANK, 1))
.exp_()
);
// scale by output gradient (blanks and first summand of non-blanks)
grad *= grad_out.view({1, batch_size, 1});
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
int threads_target = max_threads;
while (threads_target / 2 >= max_target_length) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((max_target_length + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK);
THCudaCheck(hipGetLastError()); // catch launch errors
} else { // small problem, use naive algorithm
// Still no block/grid configuration guru...
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0)) {
threads_input /= 2;
}
threads_batch = ::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK);
THCudaCheck(hipGetLastError()); // catch launch errors
}
return grad;
}
} // namespace
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss", [&] {
if (targets.type().scalarType() == kLong) {
return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
} else {
return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK);
}
});
}
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss_backward", [&] {
if (targets.type().scalarType() == kLong) {
return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK);
} else {
return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK);
}
});
}
} } // at::native
| d7f1a4c3a224aed7b1a1a24c4b14172d5b781b4a.cu | // Copyright (c) 2018 MathInf GmbH, Thomas Viehmann
// Licensed under the BSD-3-Clause license
// This is the GPU implementation of the Connectionist Temporal Loss.
// We mostly follow Graves.
// 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf
// We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based.
// Graves et al call the probabilities y, we use log_probs (also calling them inputs)
// A few optimizations (simmilar to those here, but also some I didn't take) are described in
// 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/ATen.h>
#include "ATen/Dispatch.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include <type_traits>
#include <numeric>
namespace at {
namespace native {
namespace {
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
// __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template<typename target_t>
__device__ static inline int64_t get_target_prime(const target_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
// this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1).
// A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha).
// In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not
// needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead
// is small and the use-case for loss without grad is relatively limited.
// We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do
// target in parallel, even if it means more frequent __syncthreads.
// In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been
// computed when we start a new block_s. This is why we have our own for loop here.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
// bookkeeping
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// first row (t=0), the three equations for alpha_1 above eq (6)
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t la;
switch (s) {
case 0:
la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK];
break;
case 1:
if (target_length > 0) {
la = log_probs_data[lp_batch_offset + lp_char_stride * get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)];
}
else {
la = neginf;
}
break;
default:
la = neginf;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la;
}
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2*target_length+1) {
current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) !=
current_char));
} else {
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s];
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)];
if (la2 > lamax)
lamax = la2;
} else {
la2 = neginf;
}
if (have_three) {
la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)];
if (la3 > lamax)
lamax = la3;
} else {
la3 = neginf;
}
if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend)
lamax = 0;
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = std::log(std::exp(la1-lamax)+std::exp(la2-lamax)+std::exp(la3-lamax))+lamax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char];
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf;
}
}
}
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
// compute the loss (eq (8))
if (threadIdx.x == 0) {
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)];
scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2-1)];
scalar_t m = ((l1 > l2) ? l1 : l2);
m = ((m == neginf) ? 0 : m);
scalar_t log_likelihood = std::log(std::exp(l1-m)+std::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
// The forward computation. Lot's of admin and a call to the alpha kernel.
// Note: we do not check that the labels are in the valid range. As we use
// them for indexing in the kernels, you'll see memory errors when you
// pass corrupt labels.
// We support both a 2-dimensional tensor as targets (one set of targets in each row) and
// a 1-dimensional tensor where all targets are concatenated (and we use target_lengths
// to figure out where they begin).
// We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the
// backward. The dispatch function will only return the loss.
template<typename scalar_t, ScalarType target_scalar_type>
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
// log_probs: input_len x batch_size x num_labels
// targets [int64]: batch_size x target_length OR sum(target_lengths)
CheckedFrom c = "ctc_loss_gpu";
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to log_probs cuda if it isn't there already
auto log_probs_arg = TensorArg(log_probs, "log_probs", 1);
auto targets_arg = TensorArg(targets, "targets", 2);
checkAllSameGPU(c, {log_probs_arg, targets_arg});
checkScalarType(c, targets_arg, target_scalar_type);
checkDim(c, log_probs_arg, 3);
checkDimRange(c, targets_arg, 1, 3);
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
AT_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
AT_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
AT_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong));
auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
checkSize(c, targets_arg, 0, pos);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = targets.size(1);
checkSize(c, targets_arg, 0, batch_size);
AT_CHECK(targets.size(1) >= max_target_length,
"Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
int64_t max_input_length = log_probs.size(0);
for (int64_t b = 0; b < batch_size; b++) {
AT_CHECK(input_lengths[b] <= max_input_length,
"Expected tensor to have size at least ", max_input_length, " at dimension 1, but got size ", targets.size(0), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong));
Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options());
// Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu...
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>(
log_alpha.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, BLANK);
THCudaCheck(cudaGetLastError()); // catch launch errors
return std::make_tuple(neg_log_likelihood, log_alpha);
}
// The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the
// alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.)
template<typename scalar_t, typename target_t>
__global__ void
__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch)
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t lb;
if (s == 2*target_length) {
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK];
} else if ((target_length > 0) && (s == 2*target_length-1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime];
} else {
lb = neginf;
}
if (s < 2*max_target_length+1) {
log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb;
}
}
// go backward in s
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
int64_t current_target_prime;
bool have_three;
if (s < 2*target_length+1) {
current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s < 2*target_length-1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) !=
current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s];
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)];
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = neginf;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)];
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = neginf;
}
if (lbmax == neginf)
lbmax = 0;
scalar_t lb = std::log(std::exp(lb1-lbmax)+std::exp(lb2-lbmax)+std::exp(lb3-lbmax))+lbmax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb;
} else if ((s < 2*max_target_length+1) && ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length))) {
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = neginf;
}
}
}
}
// This implements the subtrahend of equation (16) for all *nonblank* characters.
// It assumes you have probs in gradient_data when called
// and it modifies gradient_data to be, the gradient.
// In order to facilitate this inplace update, We don't actually do this in logspace.
// (The other variant implemented uses log_space and the differences seem to be
// not so problematic at least with unit normal distributed test activations.)
// Internally this uses atomicAdd because different threads may write to the same
// gradient position.
// This is parallelised over b and s again.
// Note that for us, the Z of eqn (16) is actually constant for all t and it is the
// likelihood - this is why we use the negative log likelihood below.
// We also multiply by the input gradient to keep with standard autograd style.
// I took this trick from [2], for moderate alphabet sizes a log-space
// calculation (with an atomic log add) is similarly in performance, but for large
// alphabets the inplace nature is a considerable advantage.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t s = threadIdx.x + blockIdx.x * blockDim.y; // note, this directly indexes into targets, no targets prime!
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s * tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t t = 0; t < input_length; t++) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target];
atomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
-std::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
+ nll - lp) * gr);
}
}
// This is the naive implementation of equation (16). It is parallelised in batch and input timestep.
// It appears to be faster than the above method for small batch sizes.
template<typename scalar_t, typename target_t>
__global__ void ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if ((t >= max_input_length) || (b >= batch_size))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
// collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int s = 0; s < 2*max_target_length+1; s++) {
if ((target_length > 0) && (s < 2*target_length+1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]);
scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime];
if (lcab == neginf) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = std::log(std::exp(lcab-max)+std::exp(log_alpha_beta-max))+max;
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c];
if (t < input_length) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c];
res = (std::exp(lp)-std::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
// The backward. It essentially computes eq 16 by using the above kernels.
// We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward.
template<typename scalar_t, ScalarType target_scalar_type>
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to cuda if it isn't there already
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong)));
auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = targets.size(1);
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong));
tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong));
Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta))
// As above, there may be better configurations to use.
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
{
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, BLANK);
THCudaCheck(cudaGetLastError()); // catch launch errors
}
// Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on
// the (capped) difference of timings.
// Note that for OK problems target length <= input length, so we
// only consider input length.
bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
if (is_large) { // large alphabet, large batch
// this computes the probs, minuend in (16)
exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
auto grad_blank = grad.narrow(2, BLANK, 1);
grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2})
+ log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}),
2, true)
.permute({1, 0, 2})
.add_(neg_log_likelihood.view({1, batch_size, 1}))
.sub_(log_probs.narrow(2, BLANK, 1))
.exp_()
);
// scale by output gradient (blanks and first summand of non-blanks)
grad *= grad_out.view({1, batch_size, 1});
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
int threads_target = max_threads;
while (threads_target / 2 >= max_target_length) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((max_target_length + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK);
THCudaCheck(cudaGetLastError()); // catch launch errors
} else { // small problem, use naive algorithm
// Still no block/grid configuration guru...
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0)) {
threads_input /= 2;
}
threads_batch = std::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch);
ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0),
targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK);
THCudaCheck(cudaGetLastError()); // catch launch errors
}
return grad;
}
} // namespace
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss", [&] {
if (targets.type().scalarType() == kLong) {
return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
} else {
return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK);
}
});
}
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss_backward", [&] {
if (targets.type().scalarType() == kLong) {
return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK);
} else {
return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK);
}
});
}
} } // at::native
|
4a884e54da29b3011784c8f9fc9246385648ade8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_OneVsAllNLLCriterion_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int dim = 2;
int sizeaverage = XSIZE*YSIZE;
float *positiveWeight = NULL;
hipMalloc(&positiveWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_OneVsAllNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_OneVsAllNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_OneVsAllNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4a884e54da29b3011784c8f9fc9246385648ade8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_OneVsAllNLLCriterion_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int dim = 2;
int sizeaverage = XSIZE*YSIZE;
float *positiveWeight = NULL;
cudaMalloc(&positiveWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_OneVsAllNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_OneVsAllNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_OneVsAllNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
549745ac35dda1e83f45c53e57f9fc43dfd6eed4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#define BDIM 1024
__global__ void fast_transpose(double *a, double *b, int N) {
// buffer
__shared__ double buffer[BDIM];
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
// doing the transposition on the shared memory
buffer[threadIdx.y * blockDim.x + threadIdx.x] = a[y * N + x];
__syncthreads();
// copy back on global memory
b[x * N + y] = buffer[threadIdx.y * blockDim.x + threadIdx.x];
}
// naive transpose
__global__ void transpose(double *a, double *b, int N) {
int row = (blockIdx.x * blockDim.x + threadIdx.x) / N;
int col = (blockIdx.x * blockDim.x + threadIdx.x) % N;
b[col * N + row] = a[row * N + col];
}
// just randomlly fill the matrix
void random_fill(double *mat, int N) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
mat[i * N + j] = (double)rand() / (double)RAND_MAX * 100.;
}
// Used for error-checking
void transpose_cpu(double *a, double *b, int N) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
b[j * N + i] = a[i * N + j];
}
// check if two matrix are equals
int is_equal(double *a, double *b, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
if (b[i * N + j] != a[i * N + j])
return 0;
}
return 1;
}
void print_mat(double *a, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%.1f ", a[i * N + j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[]) {
double *a, *b, *c, *d; // host copies of a, b, c
const int N = 8192;
double *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * N * sizeof(double); // we need space for 512
// Get the number of block dimensions (dim1*dim2 = number of threads)
if (argc < 3) {
printf("Insert the dimensions, first x, second y\n");
return -1;
}
// get block dimensions from command line
const int dim1 = atoi(argv[1]);
const int dim2 = atoi(argv[2]);
const int Nblocks = (N * N) / 1024;
if (dim1 * dim2 != BDIM) {
printf("Give rigth dimensions\n");
return -2;
}
dim3 grid, block;
block.x = dim1;
block.y = dim2;
grid.x = N / block.x;
grid.y = N / block.y;
// allocate device copies of a, b, c
hipMalloc((void **)&dev_a, size);
hipMalloc((void **)&dev_b, size);
a = (double *)malloc(size);
b = (double *)malloc(size);
d = (double *)malloc(size);
// fill the matrix with random numbers
random_fill(a, N);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
// cuda event for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( fast_transpose), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, N);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost);
// print_mat(b,N);
transpose_cpu(a, d, N);
int equal = is_equal(b, d, N);
if (equal)
printf("Correct fast\n");
else
printf("Uncorrect fast\n");
// Bandwith for reading from matrix a + writing on matrix b
printf("Time fast= %f\n", milliseconds);
printf("Bandwidth fast= %f\n", N * N * 2 * 8 / milliseconds / 1e6);
free(b);
// print_mat(d,N);
hipFree(dev_b);
c = (double *)malloc(size);
hipMalloc((void **)&dev_c, size);
hipEventRecord(start);
hipLaunchKernelGGL(( transpose), dim3(Nblocks), dim3(1024), 0, 0, dev_a, dev_c, N);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
equal = is_equal(c, d, N);
if (equal)
printf("Correct naive\n");
else
printf("Uncorrect naive\n");
printf("Time naive = %f\n", milliseconds);
printf("Bandwidth naive= %f\n", N * N * 2 * 8 / milliseconds / 1e6);
free(a);
free(c);
free(d);
hipFree(dev_a);
hipFree(dev_c);
return 0;
}
| 549745ac35dda1e83f45c53e57f9fc43dfd6eed4.cu | #include <math.h>
#include <stdio.h>
#define BDIM 1024
__global__ void fast_transpose(double *a, double *b, int N) {
// buffer
__shared__ double buffer[BDIM];
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
// doing the transposition on the shared memory
buffer[threadIdx.y * blockDim.x + threadIdx.x] = a[y * N + x];
__syncthreads();
// copy back on global memory
b[x * N + y] = buffer[threadIdx.y * blockDim.x + threadIdx.x];
}
// naive transpose
__global__ void transpose(double *a, double *b, int N) {
int row = (blockIdx.x * blockDim.x + threadIdx.x) / N;
int col = (blockIdx.x * blockDim.x + threadIdx.x) % N;
b[col * N + row] = a[row * N + col];
}
// just randomlly fill the matrix
void random_fill(double *mat, int N) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
mat[i * N + j] = (double)rand() / (double)RAND_MAX * 100.;
}
// Used for error-checking
void transpose_cpu(double *a, double *b, int N) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
b[j * N + i] = a[i * N + j];
}
// check if two matrix are equals
int is_equal(double *a, double *b, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
if (b[i * N + j] != a[i * N + j])
return 0;
}
return 1;
}
void print_mat(double *a, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%.1f ", a[i * N + j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[]) {
double *a, *b, *c, *d; // host copies of a, b, c
const int N = 8192;
double *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * N * sizeof(double); // we need space for 512
// Get the number of block dimensions (dim1*dim2 = number of threads)
if (argc < 3) {
printf("Insert the dimensions, first x, second y\n");
return -1;
}
// get block dimensions from command line
const int dim1 = atoi(argv[1]);
const int dim2 = atoi(argv[2]);
const int Nblocks = (N * N) / 1024;
if (dim1 * dim2 != BDIM) {
printf("Give rigth dimensions\n");
return -2;
}
dim3 grid, block;
block.x = dim1;
block.y = dim2;
grid.x = N / block.x;
grid.y = N / block.y;
// allocate device copies of a, b, c
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
a = (double *)malloc(size);
b = (double *)malloc(size);
d = (double *)malloc(size);
// fill the matrix with random numbers
random_fill(a, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
// cuda event for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
fast_transpose<<<grid, block>>>(dev_a, dev_b, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
// print_mat(b,N);
transpose_cpu(a, d, N);
int equal = is_equal(b, d, N);
if (equal)
printf("Correct fast\n");
else
printf("Uncorrect fast\n");
// Bandwith for reading from matrix a + writing on matrix b
printf("Time fast= %f\n", milliseconds);
printf("Bandwidth fast= %f\n", N * N * 2 * 8 / milliseconds / 1e6);
free(b);
// print_mat(d,N);
cudaFree(dev_b);
c = (double *)malloc(size);
cudaMalloc((void **)&dev_c, size);
cudaEventRecord(start);
transpose<<<Nblocks, 1024>>>(dev_a, dev_c, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
equal = is_equal(c, d, N);
if (equal)
printf("Correct naive\n");
else
printf("Uncorrect naive\n");
printf("Time naive = %f\n", milliseconds);
printf("Bandwidth naive= %f\n", N * N * 2 * 8 / milliseconds / 1e6);
free(a);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
ca7b213c6b4ada36b13e48bc7d55955beeee766f.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file ss_app.cu
*
* @brief subgraph matching (SM) application
*/
#include <gunrock/app/sm/sm_app.cuh>
namespace gunrock {
namespace app {
namespace sm {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<unsigned int>(
"num-subgraphs",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of matched subgraphs", __FILE__, __LINE__));
return retval;
}
} // namespace sm
} // namespace app
} // namespace gunrock
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input data graph
* @param[in] num_edges Number of edges in the input data graph
* @param[in] row_offsets CSR-formatted data graph input row offsets
* @param[in] col_indices CSR-formatted data graph input column indices
* @param[in] num_query_nodes Number of veritces in the input query graph
* @param[in] num_query_edges Number of edges in the input query graph
* @param[in] query_row_offsets CSR-formatted graph input query row offsets
* @param[in] query_col_indices CSR-formatted graph input query column indices
* @param[in] num_runs Number of runs to perform SM
* @param[out] subgraphs Return number of subgraphs
* \return double Return accumulated elapsed times for all runs
*/
double sm(
const int num_nodes,
const int num_edges,
const int *row_offsets,
const int *col_indices,
const int num_query_nodes,
const int num_query_edges,
const int *query_row_offsets,
const int *query_col_indices,
const int num_runs,
int *subgraphs)
{
return sm_template(num_nodes, num_edges, row_offsets, col_indices,
num_query_nodes, num_query_edges, query_row_offsets,
query_col_indices, num_runs, subgraphs);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| ca7b213c6b4ada36b13e48bc7d55955beeee766f.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file ss_app.cu
*
* @brief subgraph matching (SM) application
*/
#include <gunrock/app/sm/sm_app.cuh>
namespace gunrock {
namespace app {
namespace sm {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<unsigned int>(
"num-subgraphs",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of matched subgraphs", __FILE__, __LINE__));
return retval;
}
} // namespace sm
} // namespace app
} // namespace gunrock
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input data graph
* @param[in] num_edges Number of edges in the input data graph
* @param[in] row_offsets CSR-formatted data graph input row offsets
* @param[in] col_indices CSR-formatted data graph input column indices
* @param[in] num_query_nodes Number of veritces in the input query graph
* @param[in] num_query_edges Number of edges in the input query graph
* @param[in] query_row_offsets CSR-formatted graph input query row offsets
* @param[in] query_col_indices CSR-formatted graph input query column indices
* @param[in] num_runs Number of runs to perform SM
* @param[out] subgraphs Return number of subgraphs
* \return double Return accumulated elapsed times for all runs
*/
double sm(
const int num_nodes,
const int num_edges,
const int *row_offsets,
const int *col_indices,
const int num_query_nodes,
const int num_query_edges,
const int *query_row_offsets,
const int *query_col_indices,
const int num_runs,
int *subgraphs)
{
return sm_template(num_nodes, num_edges, row_offsets, col_indices,
num_query_nodes, num_query_edges, query_row_offsets,
query_col_indices, num_runs, subgraphs);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
2f82231675057937f0e0391fa8b87aa55fffb01f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int *a, *b; // host data
int *c, *c2; // results
// Cuda error checking - non mandatory
void cudaCheckError() {
hipError_t e = hipGetLastError();
if (e != hipSuccess) {
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__,
hipGetErrorString(e));
exit(0);
}
}
// GPU kernel
__global__ void vecAdd(int *A, int *B, int *C, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// CPU function
void vecAdd_h(int *A1, int *B1, int *C1, int N) {
for (int i = 0; i < N; i++) C1[i] = A1[i] + B1[i];
}
int main(int argc, char **argv) {
// Vector
int n = 1 << 25;
// Block size and number
int block_size, block_no;
for (int j = 0; j < 8; ++j) {
block_size = 256; // threads per block
for (int k = 0; k < 4; ++k) {
// Number of blocks
int nBytes = n * sizeof(int);
// memory allocation
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d, *b_d, *c_d;
block_no = n / block_size;
// Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
// Data filling
for (int i = 0; i < n; i++) a[i] = i, b[i] = i;
// GPU memory allocation
hipMalloc((void **)&a_d, n * sizeof(int));
hipMalloc((void **)&b_d, n * sizeof(int));
hipMalloc((void **)&c_d, n * sizeof(int));
hipMemcpy(a_d, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, n * sizeof(int), hipMemcpyHostToDevice);
clock_t start_d = clock();
hipLaunchKernelGGL(( vecAdd), dim3(block_no), dim3(block_size), 0, 0, a_d, b_d, c_d, n);
cudaCheckError();
// Wait for kernel call to finish
hipDeviceSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
vecAdd_h(a, b, c2, n);
clock_t end_h = clock();
// Time computing
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
// Copying data back to host, this is a blocking call and will not start
// until all kernels are finished
hipMemcpy(c, c_d, n * sizeof(int), hipMemcpyDeviceToHost);
printf("n = %d \tblockSize = %d \tGPU time = %fs \tCPU time = %fs \tSpeedup =%f\n", n,
block_size, time_d, time_h, time_h/time_d);
// Free GPU memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
block_size>>=1;
}
n>>=1;
}
return 0;
} | 2f82231675057937f0e0391fa8b87aa55fffb01f.cu | #include <cuda.h>
#include <stdio.h>
int *a, *b; // host data
int *c, *c2; // results
// Cuda error checking - non mandatory
void cudaCheckError() {
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess) {
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__,
cudaGetErrorString(e));
exit(0);
}
}
// GPU kernel
__global__ void vecAdd(int *A, int *B, int *C, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// CPU function
void vecAdd_h(int *A1, int *B1, int *C1, int N) {
for (int i = 0; i < N; i++) C1[i] = A1[i] + B1[i];
}
int main(int argc, char **argv) {
// Vector
int n = 1 << 25;
// Block size and number
int block_size, block_no;
for (int j = 0; j < 8; ++j) {
block_size = 256; // threads per block
for (int k = 0; k < 4; ++k) {
// Number of blocks
int nBytes = n * sizeof(int);
// memory allocation
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d, *b_d, *c_d;
block_no = n / block_size;
// Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
// Data filling
for (int i = 0; i < n; i++) a[i] = i, b[i] = i;
// GPU memory allocation
cudaMalloc((void **)&a_d, n * sizeof(int));
cudaMalloc((void **)&b_d, n * sizeof(int));
cudaMalloc((void **)&c_d, n * sizeof(int));
cudaMemcpy(a_d, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n * sizeof(int), cudaMemcpyHostToDevice);
clock_t start_d = clock();
vecAdd<<<block_no, block_size>>>(a_d, b_d, c_d, n);
cudaCheckError();
// Wait for kernel call to finish
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
vecAdd_h(a, b, c2, n);
clock_t end_h = clock();
// Time computing
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
// Copying data back to host, this is a blocking call and will not start
// until all kernels are finished
cudaMemcpy(c, c_d, n * sizeof(int), cudaMemcpyDeviceToHost);
printf("n = %d \tblockSize = %d \tGPU time = %fs \tCPU time = %fs \tSpeedup =%f\n", n,
block_size, time_d, time_h, time_h/time_d);
// Free GPU memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
block_size>>=1;
}
n>>=1;
}
return 0;
} |
519ae12c69dbe3e1f1b4f83bae71251c4f866721.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef CNN_GPU_KERNELS_H
#define CNN_GPU_KERNELS_H
#include "cnn/gpu-kernels.h"
#include "cnn/cuda.h"
#include "macros.h"
namespace cnn {
namespace gpu {
// adapted from NVIDIA example
__global__ void ker_l2_norm_reducer(int n, const cnn::real *x0, cnn::real* res, bool sq, bool acc) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos];
sum += sq ? d * d : d;
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) {
if (acc) res[0] += buf[0]; else res[0] = buf[0];
}
}
// A kernel to calculate the dot product between two arrays
__global__ void ker_dotproduct(int n, const cnn::real* x, const cnn::real* y, cnn::real* z) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256)
sum += x[pos] * y[pos];
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0)
z[0] = buf[0];
}
// adapted from NVIDIA example
__global__ void ker_sqeucdist(int n, const cnn::real *x0, const cnn::real *x1, cnn::real* res) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos] - x1[pos];
sum += d * d;
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) res[0] = buf[0];
}
/// compute gradient clipping
/// c = rho * b + (1-rho)*a
__global__ void ker_gradient_scaling(int n, const cnn::real *dense_param_grad_norm,
int m, const cnn::real *sparse_param_grad_norm,
cnn::real clip_threshold, int samples,
cnn::real* gscale)
{
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
sum += dense_param_grad_norm[pos];
}
for (int pos = i; pos < m; pos += 256) {
sum += sparse_param_grad_norm[pos];
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0){
*gscale = 1.0;
buf[0] = (sizeof(cnn::real) == sizeof(float)) ? sqrtf(buf[0]) : sqrt(buf[0]);
if (buf[0] > clip_threshold * samples) {
*gscale = (clip_threshold * samples) / buf[0];
}
}
}
} // namespace gpu
} // namespace cnn
#endif
| 519ae12c69dbe3e1f1b4f83bae71251c4f866721.cu | #ifndef CNN_GPU_KERNELS_H
#define CNN_GPU_KERNELS_H
#include "cnn/gpu-kernels.h"
#include "cnn/cuda.h"
#include "macros.h"
namespace cnn {
namespace gpu {
// adapted from NVIDIA example
__global__ void ker_l2_norm_reducer(int n, const cnn::real *x0, cnn::real* res, bool sq, bool acc) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos];
sum += sq ? d * d : d;
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) {
if (acc) res[0] += buf[0]; else res[0] = buf[0];
}
}
// A kernel to calculate the dot product between two arrays
__global__ void ker_dotproduct(int n, const cnn::real* x, const cnn::real* y, cnn::real* z) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256)
sum += x[pos] * y[pos];
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0)
z[0] = buf[0];
}
// adapted from NVIDIA example
__global__ void ker_sqeucdist(int n, const cnn::real *x0, const cnn::real *x1, cnn::real* res) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos] - x1[pos];
sum += d * d;
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) res[0] = buf[0];
}
/// compute gradient clipping
/// c = rho * b + (1-rho)*a
__global__ void ker_gradient_scaling(int n, const cnn::real *dense_param_grad_norm,
int m, const cnn::real *sparse_param_grad_norm,
cnn::real clip_threshold, int samples,
cnn::real* gscale)
{
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256) {
sum += dense_param_grad_norm[pos];
}
for (int pos = i; pos < m; pos += 256) {
sum += sparse_param_grad_norm[pos];
}
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0){
*gscale = 1.0;
buf[0] = (sizeof(cnn::real) == sizeof(float)) ? sqrtf(buf[0]) : sqrt(buf[0]);
if (buf[0] > clip_threshold * samples) {
*gscale = (clip_threshold * samples) / buf[0];
}
}
}
} // namespace gpu
} // namespace cnn
#endif
|
ede7d474669043f9938df1bb08e4a14e06b1dbb2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ker_gkylCartFieldScale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned s = 1;
unsigned nv = 1;
double fact = 1;
double *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ker_gkylCartFieldScale), dim3(gridBlock),dim3(threadBlock), 0, 0, s,nv,fact,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ker_gkylCartFieldScale), dim3(gridBlock),dim3(threadBlock), 0, 0, s,nv,fact,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ker_gkylCartFieldScale), dim3(gridBlock),dim3(threadBlock), 0, 0, s,nv,fact,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ede7d474669043f9938df1bb08e4a14e06b1dbb2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ker_gkylCartFieldScale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned s = 1;
unsigned nv = 1;
double fact = 1;
double *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ker_gkylCartFieldScale<<<gridBlock,threadBlock>>>(s,nv,fact,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ker_gkylCartFieldScale<<<gridBlock,threadBlock>>>(s,nv,fact,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ker_gkylCartFieldScale<<<gridBlock,threadBlock>>>(s,nv,fact,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
823043cd6fa4fd03fe5202db4cc0244d725c85a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "caffe2/core/common.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/deform_conv_op.h"
#include "caffe2/operators/deform_conv_op_impl.h"
namespace caffe2 {
typedef int64_t index_t;
typedef std::vector<int64_t> TShape;
template <typename DType>
__device__ DType deformable_im2col_bilinear(
const DType* bottom_data,
const int data_width,
const int height,
const int width,
DType h,
DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (DType)h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (DType)w_low;
} else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(
DType argmax_h,
DType argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(
DType argmax_h,
DType argmax_w,
const int height,
const int width,
const DType* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
if (argmax_h < 0)
argmax_h = 0;
if (argmax_w < 0)
argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(
const int n,
const DType* data_im,
const DType* data_offset,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr =
data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(
data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, output_im_height,
* output_im_width, ...) \param kernel_shape kernel filter shape \param pad pad
* shape \param stride stride shape \param dilation dilation shape \param
* deformable_group #offset group that deformable convolution use \param
* data_col column buffer pointer
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableIm2col(
const DType* data_im,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* data_col) {
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t channel_per_deformable_group = im_shape[1] / deformable_group_;
index_t num_kernels = im_shape[1] * size_from_dim_(1, col_shape);
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<DType>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
num_kernels,
data_im,
data_offset,
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
data_col);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im()
* instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(
const int n,
const DType* data_col,
const DType* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width &&
c10::hip::compat::abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
c10::hip::compat::abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
(c * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of a image (C, H, W,...) in the image batch
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableCol2im(
const DType* data_col,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* grad_im) {
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t im_size = size_from_dim_(1, im_shape);
index_t channel_per_deformable_group = im_shape[1] / deformable_group_;
index_t num_kernels = size_from_dim_(0, col_shape);
// num_axes should be smaller than block size
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<DType>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
num_kernels,
data_col,
data_offset,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
grad_im);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function
* deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(
const int n,
const DType* data_col,
const DType* data_im,
const DType* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* grad_offset) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * width_col *
height_col;
const DType* data_im_ptr = data_im +
deformable_group_index * channel_per_deformable_group / kernel_h /
kernel_w * height * width;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offset (C, H, W,...) in the offset batch
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableCol2imCoord(
const DType* data_col,
const DType* data_im,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* grad_offset) {
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_h() *
kernel_w() * deformable_group_;
index_t channel_per_deformable_group = col_shape[0] / deformable_group_;
// num_axes should be smaller than block size
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel<DType>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
num_kernels,
data_col,
data_im,
data_offset,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
grad_offset);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(DeformConv, DeformConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
DeformConvGradient,
DeformConvGradientOp<float, CUDAContext>);
} // namespace caffe2
| 823043cd6fa4fd03fe5202db4cc0244d725c85a0.cu | /*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai
*/
#include <cub/block/block_reduce.cuh>
#include <vector>
#include "caffe2/core/common.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/deform_conv_op.h"
#include "caffe2/operators/deform_conv_op_impl.h"
namespace caffe2 {
typedef int64_t index_t;
typedef std::vector<int64_t> TShape;
template <typename DType>
__device__ DType deformable_im2col_bilinear(
const DType* bottom_data,
const int data_width,
const int height,
const int width,
DType h,
DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (DType)h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (DType)w_low;
} else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(
DType argmax_h,
DType argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(
DType argmax_h,
DType argmax_w,
const int height,
const int width,
const DType* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
if (argmax_h < 0)
argmax_h = 0;
if (argmax_w < 0)
argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(
const int n,
const DType* data_im,
const DType* data_offset,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr =
data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(
data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, output_im_height,
* output_im_width, ...) \param kernel_shape kernel filter shape \param pad pad
* shape \param stride stride shape \param dilation dilation shape \param
* deformable_group #offset group that deformable convolution use \param
* data_col column buffer pointer
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableIm2col(
const DType* data_im,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* data_col) {
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t channel_per_deformable_group = im_shape[1] / deformable_group_;
index_t num_kernels = im_shape[1] * size_from_dim_(1, col_shape);
deformable_im2col_gpu_kernel<DType>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
num_kernels,
data_im,
data_offset,
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
data_col);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im()
* instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(
const int n,
const DType* data_col,
const DType* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width &&
c10::cuda::compat::abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
c10::cuda::compat::abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
(c * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of a image (C, H, W,...) in the image batch
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableCol2im(
const DType* data_col,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* grad_im) {
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t im_size = size_from_dim_(1, im_shape);
index_t channel_per_deformable_group = im_shape[1] / deformable_group_;
index_t num_kernels = size_from_dim_(0, col_shape);
// num_axes should be smaller than block size
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_gpu_kernel<DType>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
num_kernels,
data_col,
data_offset,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
grad_im);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function
* deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(
const int n,
const DType* data_col,
const DType* data_im,
const DType* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int height_col,
const int width_col,
DType* grad_offset) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * width_col *
height_col;
const DType* data_im_ptr = data_im +
deformable_group_index * channel_per_deformable_group / kernel_h /
kernel_w * height * width;
const DType* data_offset_ptr = data_offset +
deformable_group_index * 2 * kernel_h * kernel_w * height_col *
width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offset (C, H, W,...) in the offset batch
*/
template <typename DType, typename Context>
void DeformConvOpBase<DType, Context>::DeformableCol2imCoord(
const DType* data_col,
const DType* data_im,
const DType* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
DType* grad_offset) {
CAFFE_ENFORCE_EQ(pad_t(), pad_b());
CAFFE_ENFORCE_EQ(pad_l(), pad_r());
const int pad_h = pad_t();
const int pad_w = pad_l();
index_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_h() *
kernel_w() * deformable_group_;
index_t channel_per_deformable_group = col_shape[0] / deformable_group_;
// num_axes should be smaller than block size
CHECK_LT(2, CAFFE_CUDA_NUM_THREADS);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_coord_gpu_kernel<DType>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
num_kernels,
data_col,
data_im,
data_offset,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_h(),
kernel_w(),
pad_h,
pad_w,
stride_h(),
stride_w(),
dilation_h(),
dilation_w(),
channel_per_deformable_group,
col_shape[1],
col_shape[2],
grad_offset);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(DeformConv, DeformConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
DeformConvGradient,
DeformConvGradientOp<float, CUDAContext>);
} // namespace caffe2
|
3b6ac4e6b8b654841fd29c6b071a32214ea11700.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_matrix_mult(int *a, int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | 3b6ac4e6b8b654841fd29c6b071a32214ea11700.cu | #include "includes.h"
__global__ void gpu_matrix_mult(int *a, int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} |
010ffe289e1538791166f68eb5aeeb11a08e6614.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
static const int WORK_SIZE = 256;
#define NUM_ELEMENTS 8192
typedef struct {
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
} INTERLEAVED_T;
typedef INTERLEAVED_T INTERLEAVED_ARRAY_T[NUM_ELEMENTS];
typedef unsigned int ARRAY_MEMBER_T[NUM_ELEMENTS];
typedef struct {
ARRAY_MEMBER_T a;
ARRAY_MEMBER_T b;
ARRAY_MEMBER_T c;
ARRAY_MEMBER_T d;
} NON_INTERLEAVED_T;
__host__ hipEvent_t get_time(void)
{
hipEvent_t time;
hipEventCreate(&time);
hipEventRecord(time);
return time;
}
__host__ float add_test_non_interleaved_cpu(
NON_INTERLEAVED_T host_dest_ptr,
NON_INTERLEAVED_T const host_src_ptr, const unsigned int iter,
const unsigned int num_elements) {
hipEvent_t start_time = get_time();
for (unsigned int tid = 0; tid < num_elements; tid++) {
for (unsigned int i = 0; i < iter; i++) {
host_dest_ptr.a[tid] += host_src_ptr.a[tid];
host_dest_ptr.b[tid] += host_src_ptr.b[tid];
host_dest_ptr.c[tid] += host_src_ptr.c[tid];
host_dest_ptr.d[tid] += host_src_ptr.d[tid];
}
}
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ float add_test_interleaved_cpu(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr, const unsigned int iter,
const unsigned int num_elements) {
hipEvent_t start_time = get_time();
for (unsigned int tid = 0; tid < num_elements; tid++) {
// printf("tid: %u ", tid);
for (unsigned int i = 0; i < iter; i++) {
// printf("iteration: %un", iter);
host_dest_ptr[tid].a += host_src_ptr[tid].a;
host_dest_ptr[tid].b += host_src_ptr[tid].b;
host_dest_ptr[tid].c += host_src_ptr[tid].c;
host_dest_ptr[tid].d += host_src_ptr[tid].d;
}
}
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__global__ void add_kernel_interleaved(INTERLEAVED_T * const dest_ptr,
const INTERLEAVED_T * const src_ptr, const unsigned int iter,
const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
for(unsigned int i=0; i<iter; i++)
{
dest_ptr[tid].a += src_ptr[tid].a;
dest_ptr[tid].b += src_ptr[tid].b;
dest_ptr[tid].c += src_ptr[tid].c;
dest_ptr[tid].d += src_ptr[tid].d;
}
}
}
__global__ void add_kernel_non_interleaved(
NON_INTERLEAVED_T * const dest_ptr,
NON_INTERLEAVED_T * const src_ptr, const unsigned int iter,
const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
for (unsigned int i = 0; i < iter; i++)
{
dest_ptr->a[tid] += src_ptr->a[tid];
dest_ptr->b[tid] += src_ptr->b[tid];
dest_ptr->c[tid] += src_ptr->c[tid];
dest_ptr->d[tid] += src_ptr->d[tid];
}
}
}
__host__ float add_test_non_interleaved(NON_INTERLEAVED_T host_dest_ptr,
const NON_INTERLEAVED_T host_src_ptr, const unsigned int iter,
const unsigned int num_elements)
{
const unsigned int num_threads = 256;
const unsigned int num_blocks = (num_elements + (num_threads-1)) / num_threads;
const size_t num_bytes = sizeof(NON_INTERLEAVED_T);
NON_INTERLEAVED_T * device_dest_ptr;
NON_INTERLEAVED_T * device_src_ptr;
hipMalloc((void **) &device_src_ptr, num_bytes);
hipMalloc((void **) &device_dest_ptr, num_bytes);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
hipStream_t test_stream;
hipStreamCreate(&test_stream);
hipMemcpy(device_src_ptr, &host_src_ptr, num_bytes,hipMemcpyHostToDevice);
hipEventRecord(kernel_start, 0);
hipLaunchKernelGGL(( add_kernel_non_interleaved), dim3(num_blocks),dim3(num_threads), 0, 0, device_dest_ptr, device_src_ptr, iter, num_elements);
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float delta = 0.0F;
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
hipFree(device_src_ptr);
hipFree(device_dest_ptr);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_stop);
hipStreamDestroy(test_stream);
return delta;
}
__host__ float add_test_interleaved(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr, const unsigned int iter,
const unsigned int num_elements)
{
const unsigned int num_threads = 256;
const unsigned int num_blocks = (num_elements + (num_threads-1)) / num_threads;
const size_t num_bytes = (sizeof(INTERLEAVED_T) * num_elements);
INTERLEAVED_T * device_dest_ptr;
INTERLEAVED_T * device_src_ptr;
hipMalloc((void **) &device_src_ptr, num_bytes);
hipMalloc((void **) &device_dest_ptr, num_bytes);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
hipStream_t test_stream;
hipStreamCreate(&test_stream);
hipMemcpy(device_src_ptr, host_src_ptr, num_bytes,hipMemcpyHostToDevice);
hipEventRecord(kernel_start, 0);
hipLaunchKernelGGL(( add_kernel_interleaved), dim3(num_blocks),dim3(num_threads), 0, 0, device_dest_ptr, device_src_ptr, iter, num_elements);
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float delta = 0.0F;
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
hipFree(device_src_ptr);
hipFree(device_dest_ptr);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_stop);
hipStreamDestroy(test_stream);
return delta;
}
__host__ float select_samples_cpu(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int num_elements,
const unsigned int * const src_data)
{
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
hipEventRecord(kernel_start, 0);
unsigned int sample_idx = 0;
for(unsigned int src_idx=0; src_idx<num_elements;src_idx+=sample_interval)
{
sample_data[sample_idx] = src_data[src_idx];
sample_idx++;
}
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float delta = 0.0F;
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
__global__ void select_samples_gpu_kernel(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int * const src_data)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
sample_data[tid] = src_data[tid*sample_interval];
}
__host__ float select_samples_gpu(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int num_elements,
const unsigned int num_samples,
const unsigned int * const src_data,
const unsigned int num_threads_per_block,
const char * prefix)
{
const unsigned int num_blocks = num_samples / num_threads_per_block;
assert((num_blocks * num_threads_per_block) == num_samples);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
hipEventRecord(kernel_start, 0);
hipLaunchKernelGGL(( select_samples_gpu_kernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, sample_data, sample_interval, src_data);
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float delta = 0.0F;
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
// simple comparison function found at http://www.tutorialspoint.com/c_standard_library/c_function_qsort.htm
int compare_func (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );
}
__host__ float sort_samples_cpu(unsigned int * const sample_data,
const unsigned int num_samples)
{
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
hipEventRecord(kernel_start, 0);
qsort(sample_data, num_samples, sizeof(unsigned int), &compare_func);
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float delta = 0.0F;
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
__host__ __device__ unsigned int bin_search3(const unsigned int * const src_data,
const unsigned int search_value,
const unsigned int num_elements)
{
// Take the middle of the two sections
unsigned int size = (num_elements >> 1);
unsigned int start_idx = 0;
bool found = false;
do
{
const unsigned int src_idx = (start_idx+size);
const unsigned int test_value = src_data[src_idx];
if(test_value == search_value)
{
found = true;
}
else if(search_value > test_value)
{
start_idx = (start_idx+size);
}
if(found == false)
{
size >>= 1;
}
}
while((found == false) && (size != 0));
return (start_idx + size);
}
__host__ float count_bins_cpu(const unsigned int num_samples,
const unsigned int num_elements,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count)
{
hipEvent_t start_time = get_time();
for(unsigned int src_idx = 0; src_idx<num_elements;src_idx++)
{
const unsigned int data_to_find = src_data[src_idx];
const unsigned int idx = bin_search3(sample_data,data_to_find,num_samples);
bin_count[idx]++;
}
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
return delta;
}
//Single data point atomic add to gmem
__global__ void count_bins_gpu_kernel5(const unsigned int num_samples,
const unsigned int num_elements,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int data_to_find = src_data[tid];
const unsigned int idx = bin_search3(sample_data, data_to_find, num_samples);
atomicAdd(&bin_count[idx],1);
}
__host__ float count_bins_gpu(const unsigned int num_samples,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count,
const unsigned int num_threads,
const char * prefix)
{
const unsigned int num_blocks = num_samples / num_threads;
hipEvent_t start_time = get_time();
hipLaunchKernelGGL(( count_bins_gpu_kernel5), dim3(num_blocks),dim3(num_threads), 0, 0, num_samples, NUM_ELEMENTS, src_data, sample_data, bin_count);
// cuda_error_check(prefix, "Error invoking count_bins_gpu_kernel");
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ float calc_bin_idx_cpu(const unsigned int num_samples,
const unsigned int * const bin_count,
unsigned int * const dest_bin_idx)
{
hipEvent_t start_time = get_time();
unsigned int prefix_sum = 0;
for(unsigned int i = 0; i<num_samples;i++)
{
dest_bin_idx[i] = prefix_sum;
prefix_sum += bin_count[i];
}
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ __device__ unsigned int bitreverse(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
}
void execute_host_functions()
{
INTERLEAVED_T host_dest_ptr[NUM_ELEMENTS];
INTERLEAVED_T host_src_ptr[NUM_ELEMENTS];
NON_INTERLEAVED_T host_dest_ptr1;
NON_INTERLEAVED_T host_src_ptr1;
float non_interleaved_duration = add_test_non_interleaved_cpu(host_dest_ptr1, host_src_ptr1, 4, NUM_ELEMENTS);
float interleaved_duration = add_test_interleaved_cpu(host_dest_ptr, host_src_ptr, 4,NUM_ELEMENTS);
printf("non_interleaved_duration: %fms\n", non_interleaved_duration);
printf("interleaved_duration: %fms\n",interleaved_duration);
}
void execute_gpu_functions()
{
INTERLEAVED_T host_dest_ptr[NUM_ELEMENTS];
INTERLEAVED_T host_src_ptr[NUM_ELEMENTS];
NON_INTERLEAVED_T host_dest_ptr1;
NON_INTERLEAVED_T host_src_ptr1;
float interleaved_duration = add_test_interleaved(host_dest_ptr,host_src_ptr, 4, NUM_ELEMENTS);
hipDeviceSynchronize();
float non_interleaved_duration = add_test_non_interleaved(host_dest_ptr1,host_src_ptr1, 4, NUM_ELEMENTS);
hipDeviceSynchronize();
printf("non_interleaved_duration: %fms\n", non_interleaved_duration);
printf("interleaved_duration: %fms\n",interleaved_duration);
hipDeviceReset();
// void *d = NULL;
// unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
// int i;
// for (i = 0; i < WORK_SIZE; i++)
// idata[i] = (unsigned int) i;
// hipMalloc((void** ) &d, sizeof(int) * WORK_SIZE);
//
// hipMemcpy(d, idata, sizeof(int) * WORK_SIZE,
// hipMemcpyHostToDevice);
// bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
// hipDeviceSynchronize(); // Wait for the GPU launched work to complete
// hipGetLastError();
//
// hipMemcpy(odata, d, sizeof(int) * WORK_SIZE,
// hipMemcpyDeviceToHost);
// for (i = 0; i < WORK_SIZE; i++)
// printf("Input value: %u, device output: %u, host output: %u\n",
// idata[i], odata[i], bitreverse(idata[i]));
// hipFree((void* ) d);
// hipDeviceReset();
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
// execute_host_functions();
execute_gpu_functions();
return 0;
}
| 010ffe289e1538791166f68eb5aeeb11a08e6614.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
static const int WORK_SIZE = 256;
#define NUM_ELEMENTS 8192
typedef struct {
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
} INTERLEAVED_T;
typedef INTERLEAVED_T INTERLEAVED_ARRAY_T[NUM_ELEMENTS];
typedef unsigned int ARRAY_MEMBER_T[NUM_ELEMENTS];
typedef struct {
ARRAY_MEMBER_T a;
ARRAY_MEMBER_T b;
ARRAY_MEMBER_T c;
ARRAY_MEMBER_T d;
} NON_INTERLEAVED_T;
__host__ cudaEvent_t get_time(void)
{
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
__host__ float add_test_non_interleaved_cpu(
NON_INTERLEAVED_T host_dest_ptr,
NON_INTERLEAVED_T const host_src_ptr, const unsigned int iter,
const unsigned int num_elements) {
cudaEvent_t start_time = get_time();
for (unsigned int tid = 0; tid < num_elements; tid++) {
for (unsigned int i = 0; i < iter; i++) {
host_dest_ptr.a[tid] += host_src_ptr.a[tid];
host_dest_ptr.b[tid] += host_src_ptr.b[tid];
host_dest_ptr.c[tid] += host_src_ptr.c[tid];
host_dest_ptr.d[tid] += host_src_ptr.d[tid];
}
}
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ float add_test_interleaved_cpu(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr, const unsigned int iter,
const unsigned int num_elements) {
cudaEvent_t start_time = get_time();
for (unsigned int tid = 0; tid < num_elements; tid++) {
// printf("tid: %u ", tid);
for (unsigned int i = 0; i < iter; i++) {
// printf("iteration: %un", iter);
host_dest_ptr[tid].a += host_src_ptr[tid].a;
host_dest_ptr[tid].b += host_src_ptr[tid].b;
host_dest_ptr[tid].c += host_src_ptr[tid].c;
host_dest_ptr[tid].d += host_src_ptr[tid].d;
}
}
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__global__ void add_kernel_interleaved(INTERLEAVED_T * const dest_ptr,
const INTERLEAVED_T * const src_ptr, const unsigned int iter,
const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
for(unsigned int i=0; i<iter; i++)
{
dest_ptr[tid].a += src_ptr[tid].a;
dest_ptr[tid].b += src_ptr[tid].b;
dest_ptr[tid].c += src_ptr[tid].c;
dest_ptr[tid].d += src_ptr[tid].d;
}
}
}
__global__ void add_kernel_non_interleaved(
NON_INTERLEAVED_T * const dest_ptr,
NON_INTERLEAVED_T * const src_ptr, const unsigned int iter,
const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
for (unsigned int i = 0; i < iter; i++)
{
dest_ptr->a[tid] += src_ptr->a[tid];
dest_ptr->b[tid] += src_ptr->b[tid];
dest_ptr->c[tid] += src_ptr->c[tid];
dest_ptr->d[tid] += src_ptr->d[tid];
}
}
}
__host__ float add_test_non_interleaved(NON_INTERLEAVED_T host_dest_ptr,
const NON_INTERLEAVED_T host_src_ptr, const unsigned int iter,
const unsigned int num_elements)
{
const unsigned int num_threads = 256;
const unsigned int num_blocks = (num_elements + (num_threads-1)) / num_threads;
const size_t num_bytes = sizeof(NON_INTERLEAVED_T);
NON_INTERLEAVED_T * device_dest_ptr;
NON_INTERLEAVED_T * device_src_ptr;
cudaMalloc((void **) &device_src_ptr, num_bytes);
cudaMalloc((void **) &device_dest_ptr, num_bytes);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaStream_t test_stream;
cudaStreamCreate(&test_stream);
cudaMemcpy(device_src_ptr, &host_src_ptr, num_bytes,cudaMemcpyHostToDevice);
cudaEventRecord(kernel_start, 0);
add_kernel_non_interleaved<<<num_blocks,num_threads>>>(device_dest_ptr, device_src_ptr, iter, num_elements);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
cudaFree(device_src_ptr);
cudaFree(device_dest_ptr);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
cudaStreamDestroy(test_stream);
return delta;
}
__host__ float add_test_interleaved(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr, const unsigned int iter,
const unsigned int num_elements)
{
const unsigned int num_threads = 256;
const unsigned int num_blocks = (num_elements + (num_threads-1)) / num_threads;
const size_t num_bytes = (sizeof(INTERLEAVED_T) * num_elements);
INTERLEAVED_T * device_dest_ptr;
INTERLEAVED_T * device_src_ptr;
cudaMalloc((void **) &device_src_ptr, num_bytes);
cudaMalloc((void **) &device_dest_ptr, num_bytes);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaStream_t test_stream;
cudaStreamCreate(&test_stream);
cudaMemcpy(device_src_ptr, host_src_ptr, num_bytes,cudaMemcpyHostToDevice);
cudaEventRecord(kernel_start, 0);
add_kernel_interleaved<<<num_blocks,num_threads>>>(device_dest_ptr, device_src_ptr, iter, num_elements);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
cudaFree(device_src_ptr);
cudaFree(device_dest_ptr);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
cudaStreamDestroy(test_stream);
return delta;
}
__host__ float select_samples_cpu(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int num_elements,
const unsigned int * const src_data)
{
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaEventRecord(kernel_start, 0);
unsigned int sample_idx = 0;
for(unsigned int src_idx=0; src_idx<num_elements;src_idx+=sample_interval)
{
sample_data[sample_idx] = src_data[src_idx];
sample_idx++;
}
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
__global__ void select_samples_gpu_kernel(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int * const src_data)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
sample_data[tid] = src_data[tid*sample_interval];
}
__host__ float select_samples_gpu(unsigned int * const sample_data,
const unsigned int sample_interval,
const unsigned int num_elements,
const unsigned int num_samples,
const unsigned int * const src_data,
const unsigned int num_threads_per_block,
const char * prefix)
{
const unsigned int num_blocks = num_samples / num_threads_per_block;
assert((num_blocks * num_threads_per_block) == num_samples);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaEventRecord(kernel_start, 0);
select_samples_gpu_kernel<<<num_blocks, num_threads_per_block>>>(sample_data, sample_interval, src_data);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
// simple comparison function found at http://www.tutorialspoint.com/c_standard_library/c_function_qsort.htm
int compare_func (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );
}
__host__ float sort_samples_cpu(unsigned int * const sample_data,
const unsigned int num_samples)
{
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaEventRecord(kernel_start, 0);
qsort(sample_data, num_samples, sizeof(unsigned int), &compare_func);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
return delta;
}
__host__ __device__ unsigned int bin_search3(const unsigned int * const src_data,
const unsigned int search_value,
const unsigned int num_elements)
{
// Take the middle of the two sections
unsigned int size = (num_elements >> 1);
unsigned int start_idx = 0;
bool found = false;
do
{
const unsigned int src_idx = (start_idx+size);
const unsigned int test_value = src_data[src_idx];
if(test_value == search_value)
{
found = true;
}
else if(search_value > test_value)
{
start_idx = (start_idx+size);
}
if(found == false)
{
size >>= 1;
}
}
while((found == false) && (size != 0));
return (start_idx + size);
}
__host__ float count_bins_cpu(const unsigned int num_samples,
const unsigned int num_elements,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count)
{
cudaEvent_t start_time = get_time();
for(unsigned int src_idx = 0; src_idx<num_elements;src_idx++)
{
const unsigned int data_to_find = src_data[src_idx];
const unsigned int idx = bin_search3(sample_data,data_to_find,num_samples);
bin_count[idx]++;
}
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
return delta;
}
//Single data point atomic add to gmem
__global__ void count_bins_gpu_kernel5(const unsigned int num_samples,
const unsigned int num_elements,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int data_to_find = src_data[tid];
const unsigned int idx = bin_search3(sample_data, data_to_find, num_samples);
atomicAdd(&bin_count[idx],1);
}
__host__ float count_bins_gpu(const unsigned int num_samples,
const unsigned int * const src_data,
const unsigned int * const sample_data,
unsigned int * const bin_count,
const unsigned int num_threads,
const char * prefix)
{
const unsigned int num_blocks = num_samples / num_threads;
cudaEvent_t start_time = get_time();
count_bins_gpu_kernel5<<<num_blocks,num_threads>>>(num_samples, NUM_ELEMENTS, src_data, sample_data, bin_count);
// cuda_error_check(prefix, "Error invoking count_bins_gpu_kernel");
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ float calc_bin_idx_cpu(const unsigned int num_samples,
const unsigned int * const bin_count,
unsigned int * const dest_bin_idx)
{
cudaEvent_t start_time = get_time();
unsigned int prefix_sum = 0;
for(unsigned int i = 0; i<num_samples;i++)
{
dest_bin_idx[i] = prefix_sum;
prefix_sum += bin_count[i];
}
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
return delta;
}
__host__ __device__ unsigned int bitreverse(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
}
void execute_host_functions()
{
INTERLEAVED_T host_dest_ptr[NUM_ELEMENTS];
INTERLEAVED_T host_src_ptr[NUM_ELEMENTS];
NON_INTERLEAVED_T host_dest_ptr1;
NON_INTERLEAVED_T host_src_ptr1;
float non_interleaved_duration = add_test_non_interleaved_cpu(host_dest_ptr1, host_src_ptr1, 4, NUM_ELEMENTS);
float interleaved_duration = add_test_interleaved_cpu(host_dest_ptr, host_src_ptr, 4,NUM_ELEMENTS);
printf("non_interleaved_duration: %fms\n", non_interleaved_duration);
printf("interleaved_duration: %fms\n",interleaved_duration);
}
void execute_gpu_functions()
{
INTERLEAVED_T host_dest_ptr[NUM_ELEMENTS];
INTERLEAVED_T host_src_ptr[NUM_ELEMENTS];
NON_INTERLEAVED_T host_dest_ptr1;
NON_INTERLEAVED_T host_src_ptr1;
float interleaved_duration = add_test_interleaved(host_dest_ptr,host_src_ptr, 4, NUM_ELEMENTS);
cudaThreadSynchronize();
float non_interleaved_duration = add_test_non_interleaved(host_dest_ptr1,host_src_ptr1, 4, NUM_ELEMENTS);
cudaThreadSynchronize();
printf("non_interleaved_duration: %fms\n", non_interleaved_duration);
printf("interleaved_duration: %fms\n",interleaved_duration);
cudaDeviceReset();
// void *d = NULL;
// unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
// int i;
// for (i = 0; i < WORK_SIZE; i++)
// idata[i] = (unsigned int) i;
// cudaMalloc((void** ) &d, sizeof(int) * WORK_SIZE);
//
// cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE,
// cudaMemcpyHostToDevice);
// bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
// cudaThreadSynchronize(); // Wait for the GPU launched work to complete
// cudaGetLastError();
//
// cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE,
// cudaMemcpyDeviceToHost);
// for (i = 0; i < WORK_SIZE; i++)
// printf("Input value: %u, device output: %u, host output: %u\n",
// idata[i], odata[i], bitreverse(idata[i]));
// cudaFree((void* ) d);
// cudaDeviceReset();
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
// execute_host_functions();
execute_gpu_functions();
return 0;
}
|
7f7f595bcd6b11162a2ce0aa86798984ff2f3525.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gpu.h"
__device__ float *g_Vx0;
__device__ float *g_Vz0;
__device__ float *g_sigmaxx0;
__device__ float *g_sigmazz0;
__device__ float *g_sigmaxz0;
__device__ float *g_m1_x;
__device__ float *g_m1_z;
__device__ float *g_aux_m2_c;
__device__ float *g_aux_m3_c;
__device__ float *g_aux_m2m3_c;
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const hipError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//set cuda devices and put all data onto gpu memory
hipError_t cuda_ret;
hipError_t err;
//Set Device
fprintf(stderr,"GPU init. \n");
cuda_ret = hipSetDevice(0);
if(cuda_ret != hipSuccess){
fprintf(stderr, "Failed to Set The cuda Device !\n");
}
else{
fprintf(stderr, "GPU Device Set OK\n");
}
// data init
hipMalloc(&g_Vx0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_Vz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmaxx0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmazz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmaxz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_m1_x,sizeof(float)*nx*nz);
hipMalloc(&g_m1_z,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m2_c,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m3_c,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m2m3_c,sizeof(float)*nx*nz);
fprintf(stderr,"GPU Data Init OK\n");
// data copy
// hipMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_in(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy
hipMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_out(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
hipMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
hipMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
hipMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
hipMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
hipMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
fprintf(stderr,"Data Copy To CPU OK\n");
}
extern "C" void rtm_gpu_final(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
// hipMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// fprintf(stderr,"Data Copy To CPU OK\n");
hipFree(&g_Vx0);
hipFree(&g_Vz0);
hipFree(&g_sigmaxx0);
hipFree(&g_sigmazz0);
hipFree(&g_sigmaxz0);
hipFree(&g_m1_x);
hipFree(&g_m1_z);
hipFree(&g_aux_m2_c);
hipFree(&g_aux_m3_c);
hipFree(&g_aux_m2m3_c);
fprintf(stderr,"GPU Mem Released OK\n");
}
__global__ void rtm_gpu_kernel(int it,int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0) //(nz, nx, nt)
//float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//GPU thread index
int gz, gx, gt;
gz = blockIdx.x*blockDim.x + threadIdx.x;
gx = blockIdx.y*blockDim.y + threadIdx.y;
gt = it;
// gt = blockIdx.z*blockDim.y + threadIdx.z;
Vx0[index3d(gz, gx, gt)] = Vx0[index3d(gz, gx, gt)] + Vx0[index3d(gz, gx, gt+2)];
Vz0[index3d(gz, gx, gt)] = Vz0[index3d(gz, gx, gt)] + Vz0[index3d(gz, gx, gt+2)];
sigmaxx0[index3d(gz, gx, gt)] = sigmaxx0[index3d(gz, gx, gt)] + sigmaxx0[index3d(gz, gx, gt+2)];
sigmazz0[index3d(gz, gx, gt)] = sigmazz0[index3d(gz, gx, gt)] + sigmazz0[index3d(gz, gx, gt+2)];
sigmaxz0[index3d(gz, gx, gt)] = sigmaxz0[index3d(gz, gx, gt)] + sigmaxz0[index3d(gz, gx, gt+2)];
}
extern "C" void rtm_gpu_func(int it, int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
hipError_t err;
hipEvent_t start, stop;
float elapsedTime = 0.0f;
//time record
dim3 dimGrid(nz/TZ, nx/TX);
dim3 dimBlock(TZ, TX);
//RTM kernel
fprintf(stderr,"GPU Computing...(NZ=%d, NX=%d, TZ=%d, TX=%d)\n", nz, nx, TZ, TX);
//hipEventRecord(start, 0);
hipLaunchKernelGGL(( rtm_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, it,nt, nz, nx, g_Vx0, g_Vz0, g_sigmaxx0, g_sigmazz0, g_sigmaxz0);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err)
fprintf(stderr, "Cuda error: %s.\n", hipGetErrorString(err));
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&elapsedTime, start, stop);
//fprintf(stderr,"GPU Computational Elapsed Time: %.4f\n",elapsedTime);
}
| 7f7f595bcd6b11162a2ce0aa86798984ff2f3525.cu | #include <stdio.h>
#include "gpu.h"
__device__ float *g_Vx0;
__device__ float *g_Vz0;
__device__ float *g_sigmaxx0;
__device__ float *g_sigmazz0;
__device__ float *g_sigmaxz0;
__device__ float *g_m1_x;
__device__ float *g_m1_z;
__device__ float *g_aux_m2_c;
__device__ float *g_aux_m3_c;
__device__ float *g_aux_m2m3_c;
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const cudaError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//set cuda devices and put all data onto gpu memory
cudaError_t cuda_ret;
cudaError_t err;
//Set Device
fprintf(stderr,"GPU init. \n");
cuda_ret = cudaSetDevice(0);
if(cuda_ret != cudaSuccess){
fprintf(stderr, "Failed to Set The cuda Device !\n");
}
else{
fprintf(stderr, "GPU Device Set OK\n");
}
// data init
cudaMalloc(&g_Vx0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_Vz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxx0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmazz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_m1_x,sizeof(float)*nx*nz);
cudaMalloc(&g_m1_z,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m3_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2m3_c,sizeof(float)*nx*nz);
fprintf(stderr,"GPU Data Init OK\n");
// data copy
// cudaMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_in(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy
cudaMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_out(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
cudaMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
fprintf(stderr,"Data Copy To CPU OK\n");
}
extern "C" void rtm_gpu_final(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
// cudaMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// fprintf(stderr,"Data Copy To CPU OK\n");
cudaFree(&g_Vx0);
cudaFree(&g_Vz0);
cudaFree(&g_sigmaxx0);
cudaFree(&g_sigmazz0);
cudaFree(&g_sigmaxz0);
cudaFree(&g_m1_x);
cudaFree(&g_m1_z);
cudaFree(&g_aux_m2_c);
cudaFree(&g_aux_m3_c);
cudaFree(&g_aux_m2m3_c);
fprintf(stderr,"GPU Mem Released OK\n");
}
__global__ void rtm_gpu_kernel(int it,int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0) //(nz, nx, nt)
//float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//GPU thread index
int gz, gx, gt;
gz = blockIdx.x*blockDim.x + threadIdx.x;
gx = blockIdx.y*blockDim.y + threadIdx.y;
gt = it;
// gt = blockIdx.z*blockDim.y + threadIdx.z;
Vx0[index3d(gz, gx, gt)] = Vx0[index3d(gz, gx, gt)] + Vx0[index3d(gz, gx, gt+2)];
Vz0[index3d(gz, gx, gt)] = Vz0[index3d(gz, gx, gt)] + Vz0[index3d(gz, gx, gt+2)];
sigmaxx0[index3d(gz, gx, gt)] = sigmaxx0[index3d(gz, gx, gt)] + sigmaxx0[index3d(gz, gx, gt+2)];
sigmazz0[index3d(gz, gx, gt)] = sigmazz0[index3d(gz, gx, gt)] + sigmazz0[index3d(gz, gx, gt+2)];
sigmaxz0[index3d(gz, gx, gt)] = sigmaxz0[index3d(gz, gx, gt)] + sigmaxz0[index3d(gz, gx, gt+2)];
}
extern "C" void rtm_gpu_func(int it, int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
cudaError_t err;
cudaEvent_t start, stop;
float elapsedTime = 0.0f;
//time record
dim3 dimGrid(nz/TZ, nx/TX);
dim3 dimBlock(TZ, TX);
//RTM kernel
fprintf(stderr,"GPU Computing...(NZ=%d, NX=%d, TZ=%d, TX=%d)\n", nz, nx, TZ, TX);
//cudaEventRecord(start, 0);
rtm_gpu_kernel<<<dimGrid, dimBlock>>>(it,nt, nz, nx, g_Vx0, g_Vz0, g_sigmaxx0, g_sigmazz0, g_sigmaxz0);
cudaThreadSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
fprintf(stderr, "Cuda error: %s.\n", cudaGetErrorString(err));
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime, start, stop);
//fprintf(stderr,"GPU Computational Elapsed Time: %.4f\n",elapsedTime);
}
|
4fae955887b2564a01c97e72193bc67623ba5856.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe | 4fae955887b2564a01c97e72193bc67623ba5856.cu | #include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe |
8e9b27708a831ab15a41de3bc57124b10e3be190.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
namespace {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto lparam = CreateEmptyGenericParam(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &lparam)};
metric->Configure(Args{});
HostDeviceVector<float> predts;
MetaInfo info;
auto &h_predts = predts.HostVector();
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist{0.0f, 1.0f};
size_t n_samples = 2048;
h_predts.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_predts[i] = dist(&lcg);
}
auto &h_upper = info.labels_upper_bound_.HostVector();
auto &h_lower = info.labels_lower_bound_.HostVector();
h_lower.resize(n_samples);
h_upper.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_lower[i] = 1;
h_upper[i] = 10;
}
auto result = metric->Eval(predts, info);
for (size_t i = 0; i < 8; ++i) {
ASSERT_EQ(metric->Eval(predts, info), result);
}
}
} // anonymous namespace
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.25f);
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
CheckDeterministicMetricElementWise(StringView{"aft-nloglik"}, GPUIDX);
}
} // namespace common
} // namespace xgboost
| 8e9b27708a831ab15a41de3bc57124b10e3be190.cu | /*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
namespace {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto lparam = CreateEmptyGenericParam(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &lparam)};
metric->Configure(Args{});
HostDeviceVector<float> predts;
MetaInfo info;
auto &h_predts = predts.HostVector();
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist{0.0f, 1.0f};
size_t n_samples = 2048;
h_predts.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_predts[i] = dist(&lcg);
}
auto &h_upper = info.labels_upper_bound_.HostVector();
auto &h_lower = info.labels_lower_bound_.HostVector();
h_lower.resize(n_samples);
h_upper.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_lower[i] = 1;
h_upper[i] = 10;
}
auto result = metric->Eval(predts, info);
for (size_t i = 0; i < 8; ++i) {
ASSERT_EQ(metric->Eval(predts, info), result);
}
}
} // anonymous namespace
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.25f);
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
CheckDeterministicMetricElementWise(StringView{"aft-nloglik"}, GPUIDX);
}
} // namespace common
} // namespace xgboost
|
11843754d6d67f5f2a6cdcc6dd04cad7324f67b1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "score/scores.h"
#include <gtest/gtest.h>
#include "random/rng.h"
#include "test_utils.h"
#include <iostream>
namespace MLCommon {
namespace Score {
class ScoreTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
typedef ScoreTest ScoreTestHighScore;
TEST(ScoreTestHighScore, Result) {
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.12, 0.22, 0.32, 0.42, 0.52};
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
float *d_y;
MLCommon::allocate(d_y, 5);
float *d_y_hat;
MLCommon::allocate(d_y_hat, 5);
MLCommon::updateDevice(d_y_hat, y_hat, 5, stream);
MLCommon::updateDevice(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
ASSERT_TRUE(result == 0.98f);
CUDA_CHECK(hipStreamDestroy(stream));
}
typedef ScoreTest ScoreTestLowScore;
TEST(ScoreTestLowScore, Result) {
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.012, 0.022, 0.032, 0.042, 0.052};
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
float *d_y;
MLCommon::allocate(d_y, 5);
float *d_y_hat;
MLCommon::allocate(d_y_hat, 5);
MLCommon::updateDevice(d_y_hat, y_hat, 5, stream);
MLCommon::updateDevice(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
std::cout << "Result: " << result - -3.4012f << std::endl;
ASSERT_TRUE(result - -3.4012f < 0.00001);
CUDA_CHECK(hipStreamDestroy(stream));
}
}}
| 11843754d6d67f5f2a6cdcc6dd04cad7324f67b1.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "score/scores.h"
#include <gtest/gtest.h>
#include "random/rng.h"
#include "test_utils.h"
#include <iostream>
namespace MLCommon {
namespace Score {
class ScoreTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
typedef ScoreTest ScoreTestHighScore;
TEST(ScoreTestHighScore, Result) {
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.12, 0.22, 0.32, 0.42, 0.52};
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
float *d_y;
MLCommon::allocate(d_y, 5);
float *d_y_hat;
MLCommon::allocate(d_y_hat, 5);
MLCommon::updateDevice(d_y_hat, y_hat, 5, stream);
MLCommon::updateDevice(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
ASSERT_TRUE(result == 0.98f);
CUDA_CHECK(cudaStreamDestroy(stream));
}
typedef ScoreTest ScoreTestLowScore;
TEST(ScoreTestLowScore, Result) {
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.012, 0.022, 0.032, 0.042, 0.052};
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
float *d_y;
MLCommon::allocate(d_y, 5);
float *d_y_hat;
MLCommon::allocate(d_y_hat, 5);
MLCommon::updateDevice(d_y_hat, y_hat, 5, stream);
MLCommon::updateDevice(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
std::cout << "Result: " << result - -3.4012f << std::endl;
ASSERT_TRUE(result - -3.4012f < 0.00001);
CUDA_CHECK(cudaStreamDestroy(stream));
}
}}
|
410a00e3de36087907222d9a028b0fa82d0b403d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_output_elements, max_threads)),
dim3(max_threads),
0,
stream,
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bicubic2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
| 410a00e3de36087907222d9a028b0fa82d0b403d.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
output.zero_();
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_output_elements = output_height * output_width;
const int max_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_output_elements, max_threads),
max_threads,
0,
stream>>>(
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_bicubic2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bicubic2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_bicubic2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bicubic2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bicubic2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bicubic2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bicubic2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_cuda");
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
57a182057645263e22049e58caf1166ab160af5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// * -PSM2D-
// * P and SV WAVES
// ************************************************************************
// * Calculating P and SV wavefields in homogeneous half-space for a *
// * point source by the Finite-Difference Method. *
// * **********************************************************************
// * Last modified: May 14, 2017 *
// * Author: Yanbin WANG *
// * Department of Earth and Planetary Sciences *
// * Faculty of Sciences, Kyushu University *
// * Hakozaki 6-10-1, Fukuoka, 812-8581, Japan *
// * Now at: Department of Geophysics, Peking University *
// * 100871, Beijing, China *
// * Modified to staggered-grid scheme on 16 June 2005. *
// * Modified to PSM/FDM hybrid method in February 2006 *
// * by Xing Wei and Yanbin Wang. *
// * Modified for Lanzhou basin on 11 January 2011. *
// * by Yanbin Wang. *
// * Modified to Finite-Difference Method on March, 2016 *
// * by Xueyan Li and Yanbin Wang. *
// * Modified to Cuda C on March, 2017 *
// * by Congyue Cui and Yanbin Wang. *
// ************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// map block and thread index to i and j
#define devij(dimx, dimy) \
int i = blockIdx.x % dimx; \
int j = threadIdx.x + (blockIdx.x - i) / dimx * dimy / d_nbt; \
int ij = i * dimy + j
// parameter
const int nx = 2048, ny = 1024, nx2 = nx * 2, ny2 = ny * 2;
const float dx = 0.0342, dy = 0.0342, dt = 1.0e-3;
const int ntmax = 30000, nwrite = 500;
const float at = 0.1 / 4.0, t0 = at * 2;
const int na = 0;
const int nst = 512, nsskip = nx / nst;
const int nxa = 20, nya = 20;
const int nskip = 10, ntskp = ntmax / nskip + 1;
const int nbt = 8;
// plotting parameter
const int nsnap=60;
const float pamp=0.5,samp=2.2;
const float pampall=0.5,sampall=2.2;
// device parameter
__constant__ int is0 = 292, js0 = 146;
__constant__ float ax = 0.0342, ay = 0.0342;
__constant__ float fxx = 0.0, fyy = 0.0, fzz = 0.0;
__constant__ float dpxx = 0.0, dpyy = 0.0, dpzz = 0.0;
__constant__ float rmxx = 1.0, rmxy = 0.0, rmyy = -1.0, rmyx=0.0;
__constant__ float c0 = 9.0 / 8.0, c1 = 1.0 / 24.0;
__constant__ int d_nbt = 8;
// matrix related function
namespace mat{
float *create(const int m) {
// create floating-point device array
float *a;
hipMalloc((void**)&a, m * sizeof(float));
return a;
}
float *create_h(const int m) {
// create floating-point host array
return (float *)malloc(m * sizeof(float));
}
int *create_i(const int m){
// create integer device array
int *a;
hipMalloc((void**)&a, m * sizeof(int));
return a;
}
__global__ void set_d(float *a, const float init, const int m, const int n){
devij(m, n);
a[ij] = init;
}
void set(float *a, const float init, const int m, const int n){
// initialize the value of a device matrix
hipLaunchKernelGGL(( mat::set_d), dim3(m * nbt), dim3(n / nbt), 0, 0, a, init, m, n);
}
void copyhd(float *d_a, const float *a, const int m){
// copy memory from host(a) to device(d_a)
hipMemcpy(d_a, a , m * sizeof(float), hipMemcpyHostToDevice);
}
void copydh(float *a, const float *d_a, const int m){
// copy memory from device(d_a) to host(a)
hipMemcpy(a, d_a , m * sizeof(float), hipMemcpyDeviceToHost);
}
void write(FILE *file, float *d_a, float *a, const int nx, const int ny){
// write matrix data to file
mat::copydh(a, d_a, nx * ny);
for(int i= 0; i < nx; i++){
for(int j = 0; j < ny; j++){
fprintf(file,"%f\n", a[i * ny + j]);
}
}
}
void read(FILE *file, float *a, const int nx, const int ny){
// read matrix data from file
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
fscanf(file, "%f", a + ij);
}
}
}
}
// forward related function
namespace psv{
__device__ float dherrman(float a, float x, float x0){
float a2 = 2.0 * a;
float t = x - x0;
float td = (t + a2) / a;
if(t <= -a2) return 0.0;
if(t <= -a) return td / (a2 * a);
if(t <= a) return (-td + 2.0) / (a2 * a);
if(t <= a2) return (td - 4.0) / (a2 * a);
return 0.0;
}
__device__ float herrman(float a, float x, float x0){
float a2 = 2.0*a;
float t = x - x0;
float td = (t + a2)/a;
if(t <= -a2) return 0.0;
if(t <= -a) return (0.5 * td * td) / a2;
if(t <= a) return (-0.5 * td * td + 2.0 * td - 1.0) / a2;
if(t <= a2) return (0.5 * td * td - 4.0 * td + 8.0) / a2;
return 0.0;
}
__device__ float fxmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmzz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fxmxx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float exforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float ezforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__global__ void istxy(int *istx, int *isty, const int na){
int i = blockIdx.x;
istx[i] = i * 4 + 1;
isty[i] = na + 1;
}
__global__ void rdl(float *rig, float *den, float *lam,
const float *nd, const float *q1d,
const int nx2, const int ny2, const float dy){
devij(nx2, ny2);
float depth = j * dy / 2.0;
float vpb, vsb, rob;
float rrigb, rlanb, rdenb;
if(depth <= -q1d[i] / 1000.0){
vpb = 1.70;
vsb = 0.85;
rob = 1.8;
}
else if(depth <= -nd[i] / 1000.0){
vpb = 4.0;
vsb = 2.1;
rob = 2.4;
}
else if(depth <= 15.0){
vpb = 5.8;
vsb = 3.3;
rob = 2.7;
}
else if(depth <= 32.0){
vpb = 6.4;
vsb = 3.6;
rob = 2.85;
}
else{
vpb = 6.9;
vsb = 3.9;
rob = 3.1;
}
rrigb = rob * vsb * vsb;
rlanb = rob * vpb * vpb - 2.0 * rrigb;
rdenb = rob;
if(j < na * 2){
rig[ij] = 0.0;
den[ij] = rdenb;
lam[ij] = 0.0;
}
else{
rig[ij] = rrigb;
den[ij] = rdenb;
lam[ij] = rlanb;
}
}
__global__ void gg(float *ggg, const float apara,
const int nx, const int ny, const int nxa, const int nya){
devij(nx, ny);
if(i + 1 < nxa){
ggg[ij]=exp(-pow(apara * (nxa - i - 1), 2));
}
else if(i + 1 > (nx - nxa + 1)){
ggg[ij]=exp(-pow(apara * (i - nx + nxa), 2));
}
else if(j + 1 > (ny - nya + 1)){
ggg[ij]=exp(-pow(apara * (j - ny + nya), 2));
}
else{
ggg[ij]=1.0;
}
}
__global__ void finidyy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * ai[0] - c1 * ai[1]);
}
else if(j == 1){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j] - ai[j - 1]) - c1 * (ai[j + 1] - ai[j - 2]));
}
}
__global__ void finidyx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 2){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (-ai[ny - 1]) + c1 * ai[ny - 2]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j + 1] - ai[j]) - c1 * (ai[j + 2] - ai[j - 1]));
}
}
__global__ void finidxy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * aj[0] - c1 * aj[ny]);
}
else if(i == 1){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[i * ny] - aj[(i - 1) * ny]) - c1 * (aj[(i + 1) * ny] - aj[(i - 2) * ny]));
}
}
__global__ void finidxx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 2){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (-aj[(nx - 1) * ny ]) + c1 * aj[(nx - 2) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[(i + 1) * ny] - aj[i * ny]) - c1 * (aj[(i + 2) * ny] - aj[(i - 1) * ny]));
}
}
__global__ void sxy(float *sxx, float *syy, float *sxy,
const float *lam, const float *rig, const float *ggg,
const float *dxvx, const float *dxvy, const float *dyvx, const float *dyvy,
const int nx, const int ny, const float dt){
devij(nx, ny);
float ram1 = lam[(i * 2 + 1) * ny + j * 2];
float rig1 = rig[(i * 2 + 1) * ny + j * 2];
float rig2 = rig[i * 2 * ny + j * 2 + 1];
float gg = ggg[ij];
float sxxt1ij = (ram1 + 2.0 * rig1) * dxvx[ij] + ram1 * dyvy[ij];
float syyt1ij = (ram1 + 2.0 * rig1) * dyvy[ij] + ram1 * dxvx[ij];
float sxyt1ij = rig2 * (dxvy[ij] + dyvx[ij]);
sxx[ij] = sxx[ij] * gg + dt * sxxt1ij;
syy[ij] = syy[ij] * gg + dt * syyt1ij;
sxy[ij] = sxy[ij] * gg + dt * sxyt1ij;
if(j == na) syy[i * ny + na] = 0.0;
}
__global__ void vxyuxy(float *vx, float *vy, float *ux, float *uy,
const float *dxsxx, const float *dxsxy, const float *dysxy, const float *dysyy,
const float *ggg, const float *den, const float t, const float ftmax,
const int nx, const int ny, const float dx, const float dy, const float dt,
const float t0, const float at){
devij(nx, ny);
float gg = ggg[ij];
float denvx = den[i * 2 * ny + j * 2];
float denvy = den[(i * 2 + 1) * ny + j * 2 + 1];
float fx1,fy1;
if(t < ftmax){
fx1 = rmxx * psv::fxmxx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
rmxy * psv::fxmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
fxx * psv::fx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpxx * psv::exforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
fy1 = rmyx * psv::fzmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
rmyy * psv::fzmzz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
fzz * psv::fz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpzz * psv::ezforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
}
else{
fx1 = 0.0;
fy1 = 0.0;
}
float uxt2ij = (dxsxx[ij] + dysxy[ij] + fx1) / denvx;
float uyt2ij = (dxsxy[ij] + dysyy[ij] + fy1) / denvy;
vx[ij] = vx[ij] * gg + dt * uxt2ij;
vy[ij] = vy[ij] * gg + dt * uyt2ij;
ux[ij] = ux[ij] * gg + dt * vx[ij];
uy[ij] = uy[ij] * gg + dt * vy[ij];
}
__global__ void uxyall(float *uxall, float *uyall, const float *ux, const float *uy,
const int *istx, const int *isty, const int it1, const int ntskp, const int ny){
int ns = blockIdx.x;
int isx = istx[ns]-1;
int isy = isty[ns]-1;
if(threadIdx.x){
uxall[ns * ntskp + it1] = ux[isx * ny + isy];
}
else{
uyall[ns * ntskp + it1] = uy[isx * ny + isy];
}
}
__global__ void ups(float *up, float *us, const float *dxux, const float *dyuy,
const float *dxuy, const float *dyux, const int nx, const int ny){
devij(nx, ny);
up[ij] = dxux[ij] + dyuy[ij];
us[ij] = dxuy[ij] - dyux[ij];
}
void query(){
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i){
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", (unsigned int)devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", (unsigned int)devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", (unsigned int)devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", (unsigned int)devProp.totalConstMem);
printf("Texture alignment: %u\n", (unsigned int)devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
void forward(const char *oname, const char *wname, const int output){
// dimension
float *sxx = mat::create(nx * ny), *sxy = mat::create(nx * ny), *syy = mat::create(nx * ny);
float *den = mat::create(nx2 * ny2), *rig = mat::create(nx2 * ny2), *lam = mat::create(nx2 * ny2);
float *ux = mat::create(nx * ny), *uy = mat::create(nx * ny);
float *vx = mat::create(nx * ny), *vy = mat::create(nx * ny);
float *up = mat::create(nx * ny), *us = mat::create(nx * ny);
float *dxux = mat::create(nx * ny), *dxuy = mat::create(nx * ny);
float *dyux = mat::create(nx * ny), *dyuy = mat::create(nx * ny);
float *dxvx = mat::create(nx * ny), *dxvy = mat::create(nx * ny);
float *dyvx = mat::create(nx * ny), *dyvy = mat::create(nx * ny);
float *dxsxx = mat::create(nx * ny), *dxsxy = mat::create(nx * ny);
float *dysxy = mat::create(nx * ny), *dysyy = mat::create(nx * ny);
float *ggg = mat::create(nx * ny);
float *uxall = mat::create(nst * ntskp), *uyall = mat::create(nst * ntskp);
float *nd = mat::create(nx2), *q1d = mat::create(nx2);
float *h_up = mat::create_h(nx * ny), *h_us = mat::create_h(nx * ny);
float *h_uxall = mat::create_h(nst* ntskp), *h_uyall = mat::create_h(nst* ntskp);
int *istx = mat::create_i(nst), *isty = mat::create_i(nst);
// output file
FILE *wfile=fopen(wname,"w");
FILE *ofile=fopen(oname,"w");
// observation points
hipLaunchKernelGGL(( psv::istxy), dim3(nst), dim3(1), 0, 0, istx, isty, na);
// velocity structure
FILE *n4096 = fopen("N4096.dat", "r");
FILE *q14096 = fopen("Q14096.dat", "r");
float *h_nd = mat::create_h(nx2);
float *h_q1d = mat::create_h(nx2);
for(int i = 0; i < nx2; i++){
fscanf(n4096, "%f", &h_nd[i]);
fscanf(q14096, "%f", &h_q1d[i]);
}
fclose(n4096);
fclose(q14096);
mat::copyhd(nd, h_nd, nx2);
mat::copyhd(q1d, h_q1d, nx2);
free(h_nd);
free(h_q1d);
hipLaunchKernelGGL(( psv::rdl), dim3(nx2 * nbt), dim3(ny2 / nbt), 0, 0, rig, den, lam, nd, q1d, nx2, ny2, dy);
// initialize
float ftmax = t0 + at * 2;
mat::set(vx, 0.0, nx, ny);
mat::set(vy, 0.0, nx, ny);
mat::set(ux, 0.0, nx, ny);
mat::set(uy, 0.0, nx, ny);
mat::set(sxx, 0.0, nx, ny);
mat::set(sxy, 0.0, nx, ny);
mat::set(syy, 0.0, nx, ny);
// absorbing boundary confition
float apara = 0.015;
hipLaunchKernelGGL(( psv::gg), dim3(nx * nbt), dim3(ny / nbt), 0, 0, ggg, apara, nx, ny, nxa, nya);
// time step start
int ntw = 0;
int ntt = 0;
clock_t timestart=clock();
for(int it = 0; it < ntmax; it++){
if(it % 500 == 0){
printf("timestep: %d / %d\n", it, ntmax);
}
ntt++;
ntw++;
float t=dt*it;
hipLaunchKernelGGL(( psv::finidxx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, vx, dxvx, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidxy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, vy, dxvy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, vx, dyvx, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, vy, dyvy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::sxy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, sxx, syy, sxy, lam, rig, ggg, dxvx, dxvy, dyvx, dyvy, nx, ny, dt);
hipLaunchKernelGGL(( psv::finidxy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, sxx, dxsxx, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidxx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, sxy, dxsxy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, sxy, dysxy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, syy, dysyy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::vxyuxy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, vx, vy, ux, uy, dxsxx, dxsxy, dysxy, dysyy, ggg, den, t, ftmax, nx, ny, dx, dy, dt, t0, at);
if(ntt == nskip){
// save waveform
ntt = 0;
hipLaunchKernelGGL(( uxyall), dim3(nst), dim3(2), 0, 0, uxall, uyall, ux, uy, istx, isty, (it+1)/nskip, ntskp, ny);
}
if(output && ntw == nwrite){
// write snapshot
ntw = 0;
hipLaunchKernelGGL(( psv::finidxx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, ux, dxux, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidxy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, uy, dxuy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyx), dim3(nx * nbt), dim3(ny / nbt), 0, 0, ux, dyux, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::finidyy), dim3(nx * nbt), dim3(ny / nbt), 0, 0, uy, dyuy, nx, ny, dx, dy, dt);
hipLaunchKernelGGL(( psv::ups), dim3(nx * nbt), dim3(ny / nbt), 0, 0, up, us, dxux, dyuy, dxuy, dyux, nx, ny);
mat::write(ofile, up, h_up, nx, ny);
mat::write(ofile, us, h_us, nx, ny);
}
}
{
printf("\ntotal time: %.2fs\n",(float)(clock()-timestart)/CLOCKS_PER_SEC);
size_t free_byte ;
size_t total_byte ;
hipMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
// write waveform
mat::write(wfile, uxall, h_uxall, nst, ntskp);
mat::write(wfile, uyall, h_uxall, nst, ntskp);
fclose(ofile);
fclose(wfile);
hipFree(sxx); hipFree(sxy); hipFree(syy);
hipFree(den); hipFree(rig); hipFree(lam);
hipFree(ux); hipFree(uy);
hipFree(vx); hipFree(uy);
hipFree(up); hipFree(us);
hipFree(dxux); hipFree(dxuy);
hipFree(dyux); hipFree(dyuy);
hipFree(dxvx); hipFree(dxvy);
hipFree(dyvx); hipFree(dyvy);
hipFree(dxsxx); hipFree(dxsxy);
hipFree(dysxy); hipFree(dysyy);
hipFree(ggg);
hipFree(nd); hipFree(q1d);
hipFree(istx); hipFree(isty);
free(h_up); free(h_us);
free(h_uxall); free(h_uyall);
}
void waveform(const char *wname){
int ndskip = 1;
float dt2 = dt * 10, dx2 = dx * 4;
float *ux = mat::create_h(nst * ntskp), *uz = mat::create_h(nst * ntskp);
FILE *file = fopen(wname,"r");
FILE *filex = fopen("ux", "w");
FILE *filez = fopen("uz", "w");
mat::read(file, ux, nst, ntskp);
mat::read(file, uz, nst, ntskp);
fclose(file);
for(int i = 0; i < nst; i += nsskip){
fprintf(filex, ">\n");
fprintf(filez, ">\n");
for(int j = 0; j < ntskp; j += ndskip){
int ij = i * ntskp + j;
float tm = j*dt2;
float shift = i*dx2;
fprintf(filex, "%f %f\n", tm, ux[ij] * 15.0 + shift);
fprintf(filez, "%f %f\n", tm, uz[ij] * 15.0 + shift);
}
}
}
void snapshot(const char *oname){
FILE *file=fopen(oname,"r");
float *up = mat::create_h(nx * ny), *us = mat::create_h(nx * ny);
float *u = mat::create_h(nx * ny), *p = mat::create_h(nx * ny), *s = mat::create_h(nx * ny);
int n[5]={0,1,2,3,4};
FILE **snapshot = (FILE **)malloc(5*sizeof(FILE *));
*snapshot = fopen("snap1", "w");
*(snapshot + 1) = fopen("snap2", "w");
*(snapshot + 2) = fopen("snap3", "w");
*(snapshot + 3) = fopen("snap4", "w");
*(snapshot + 4) = fopen("snap5", "w");
float pmax, smax, cp, lp ,cs, ls, x, y;
for(int isnap = 0; isnap < nsnap; isnap++){
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
u[i * ny + j] = 0;
}
}
mat::read(file, up, nx, ny);
mat::read(file, us, nx, ny);
pmax=0.0;
smax=0.0;
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
if(pmax < abs(up[ij])){
pmax = abs(up[ij]);
}
if(smax < abs(us[ij])){
smax = abs(us[ij]);
}
}
}
// printf("Pmax=%f Smax=%f\n",pmax,smax);
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cp=pamp;
lp=0.1*pmax;
if(abs(up[ij]) > cp && up[ij] < 0.0){
up[ij] = -cp;
}
else if(abs(up[ij]) > cp && up[ij] > 0.0){
up[ij] = cp;
}
if(abs(us[ij]) < lp){
up[ij] = 0.0;
}
}
}
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cs = samp;
ls = 0.1 * smax;
if(abs(us[ij]) > cs && us[ij] < 0.0){
us[ij] = -cs;
}
else if(abs(us[ij]) > cs && us[ij] > 0.0){
us[ij] = cs;
}
if(abs(us[ij]) < ls){
us[ij] = 0.0;
}
}
}
if(isnap == n[0] || isnap == n[1] || isnap == n[2] || isnap == n[3] || isnap == n[4]){
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
p[ij] = up[ij] / pampall;
s[ij] = us[ij] / sampall;
// if(up[i][j]>1e-5||us[i][j]>1e-5){
// printf("%f %f\n", up[i][j],us[i][j]);
// }
}
}
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
if(abs(s[ij]) > abs(p[ij])){
u[ij] = -abs(s[ij]);
}
else if(abs(p[ij]) > abs(s[ij])){
u[ij] = abs(s[ij]);
}
fprintf(*(snapshot+isnap), "%f %f %f\n", x, y, u[ij]);
}
}
}
}
fclose(file);
fclose(*(snapshot));
fclose(*(snapshot+1));
fclose(*(snapshot+2));
fclose(*(snapshot+3));
fclose(*(snapshot+4));
}
}
int main(int argc , char *argv[]){
// command-line options (e.g. "psv.exe fsw". default: f)
// q: gpu device query
// f: forward modeling with waveform output only
// o: forward modeling with waveform and snapshot output (with much more time consumption)
// w: convert output waveform data to gmt readable format
// s: convert output snapshot data to gmt readable format
int cfg[5] = {0};
if(argc > 1){
for(int i = 0; i < argv[1][i] != '\0'; i++){
switch(argv[1][i]){
case 'q': cfg[0] = 1;break;
case 'f': cfg[1] = 1; break;
case 'o': cfg[1] = 1; cfg[2] = 1; break;
case 'w': cfg[3] = 1; break;
case 's': cfg[4] = 1; break;
}
}
}
else{
cfg[1] = 0;
}
// output file name
char *oname="opsv";
char *wname="wpsv";
if(cfg[0]) psv::query();
if(cfg[1]) psv::forward(oname, wname, cfg[2]);
if(cfg[3]) psv::waveform(wname);
if(cfg[4]) psv::snapshot(oname);
}
| 57a182057645263e22049e58caf1166ab160af5b.cu | // * -PSM2D-
// * P and SV WAVES
// ************************************************************************
// * Calculating P and SV wavefields in homogeneous half-space for a *
// * point source by the Finite-Difference Method. *
// * **********************************************************************
// * Last modified: May 14, 2017 *
// * Author: Yanbin WANG *
// * Department of Earth and Planetary Sciences *
// * Faculty of Sciences, Kyushu University *
// * Hakozaki 6-10-1, Fukuoka, 812-8581, Japan *
// * Now at: Department of Geophysics, Peking University *
// * 100871, Beijing, China *
// * Modified to staggered-grid scheme on 16 June 2005. *
// * Modified to PSM/FDM hybrid method in February 2006 *
// * by Xing Wei and Yanbin Wang. *
// * Modified for Lanzhou basin on 11 January 2011. *
// * by Yanbin Wang. *
// * Modified to Finite-Difference Method on March, 2016 *
// * by Xueyan Li and Yanbin Wang. *
// * Modified to Cuda C on March, 2017 *
// * by Congyue Cui and Yanbin Wang. *
// ************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// map block and thread index to i and j
#define devij(dimx, dimy) \
int i = blockIdx.x % dimx; \
int j = threadIdx.x + (blockIdx.x - i) / dimx * dimy / d_nbt; \
int ij = i * dimy + j
// parameter
const int nx = 2048, ny = 1024, nx2 = nx * 2, ny2 = ny * 2;
const float dx = 0.0342, dy = 0.0342, dt = 1.0e-3;
const int ntmax = 30000, nwrite = 500;
const float at = 0.1 / 4.0, t0 = at * 2;
const int na = 0;
const int nst = 512, nsskip = nx / nst;
const int nxa = 20, nya = 20;
const int nskip = 10, ntskp = ntmax / nskip + 1;
const int nbt = 8;
// plotting parameter
const int nsnap=60;
const float pamp=0.5,samp=2.2;
const float pampall=0.5,sampall=2.2;
// device parameter
__constant__ int is0 = 292, js0 = 146;
__constant__ float ax = 0.0342, ay = 0.0342;
__constant__ float fxx = 0.0, fyy = 0.0, fzz = 0.0;
__constant__ float dpxx = 0.0, dpyy = 0.0, dpzz = 0.0;
__constant__ float rmxx = 1.0, rmxy = 0.0, rmyy = -1.0, rmyx=0.0;
__constant__ float c0 = 9.0 / 8.0, c1 = 1.0 / 24.0;
__constant__ int d_nbt = 8;
// matrix related function
namespace mat{
float *create(const int m) {
// create floating-point device array
float *a;
cudaMalloc((void**)&a, m * sizeof(float));
return a;
}
float *create_h(const int m) {
// create floating-point host array
return (float *)malloc(m * sizeof(float));
}
int *create_i(const int m){
// create integer device array
int *a;
cudaMalloc((void**)&a, m * sizeof(int));
return a;
}
__global__ void set_d(float *a, const float init, const int m, const int n){
devij(m, n);
a[ij] = init;
}
void set(float *a, const float init, const int m, const int n){
// initialize the value of a device matrix
mat::set_d<<<m * nbt, n / nbt>>>(a, init, m, n);
}
void copyhd(float *d_a, const float *a, const int m){
// copy memory from host(a) to device(d_a)
cudaMemcpy(d_a, a , m * sizeof(float), cudaMemcpyHostToDevice);
}
void copydh(float *a, const float *d_a, const int m){
// copy memory from device(d_a) to host(a)
cudaMemcpy(a, d_a , m * sizeof(float), cudaMemcpyDeviceToHost);
}
void write(FILE *file, float *d_a, float *a, const int nx, const int ny){
// write matrix data to file
mat::copydh(a, d_a, nx * ny);
for(int i= 0; i < nx; i++){
for(int j = 0; j < ny; j++){
fprintf(file,"%f\n", a[i * ny + j]);
}
}
}
void read(FILE *file, float *a, const int nx, const int ny){
// read matrix data from file
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
fscanf(file, "%f", a + ij);
}
}
}
}
// forward related function
namespace psv{
__device__ float dherrman(float a, float x, float x0){
float a2 = 2.0 * a;
float t = x - x0;
float td = (t + a2) / a;
if(t <= -a2) return 0.0;
if(t <= -a) return td / (a2 * a);
if(t <= a) return (-td + 2.0) / (a2 * a);
if(t <= a2) return (td - 4.0) / (a2 * a);
return 0.0;
}
__device__ float herrman(float a, float x, float x0){
float a2 = 2.0*a;
float t = x - x0;
float td = (t + a2)/a;
if(t <= -a2) return 0.0;
if(t <= -a) return (0.5 * td * td) / a2;
if(t <= a) return (-0.5 * td * td + 2.0 * td - 1.0) / a2;
if(t <= a2) return (0.5 * td * td - 4.0 * td + 8.0) / a2;
return 0.0;
}
__device__ float fxmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmzz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fxmxx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float exforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float ezforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__global__ void istxy(int *istx, int *isty, const int na){
int i = blockIdx.x;
istx[i] = i * 4 + 1;
isty[i] = na + 1;
}
__global__ void rdl(float *rig, float *den, float *lam,
const float *nd, const float *q1d,
const int nx2, const int ny2, const float dy){
devij(nx2, ny2);
float depth = j * dy / 2.0;
float vpb, vsb, rob;
float rrigb, rlanb, rdenb;
if(depth <= -q1d[i] / 1000.0){
vpb = 1.70;
vsb = 0.85;
rob = 1.8;
}
else if(depth <= -nd[i] / 1000.0){
vpb = 4.0;
vsb = 2.1;
rob = 2.4;
}
else if(depth <= 15.0){
vpb = 5.8;
vsb = 3.3;
rob = 2.7;
}
else if(depth <= 32.0){
vpb = 6.4;
vsb = 3.6;
rob = 2.85;
}
else{
vpb = 6.9;
vsb = 3.9;
rob = 3.1;
}
rrigb = rob * vsb * vsb;
rlanb = rob * vpb * vpb - 2.0 * rrigb;
rdenb = rob;
if(j < na * 2){
rig[ij] = 0.0;
den[ij] = rdenb;
lam[ij] = 0.0;
}
else{
rig[ij] = rrigb;
den[ij] = rdenb;
lam[ij] = rlanb;
}
}
__global__ void gg(float *ggg, const float apara,
const int nx, const int ny, const int nxa, const int nya){
devij(nx, ny);
if(i + 1 < nxa){
ggg[ij]=exp(-pow(apara * (nxa - i - 1), 2));
}
else if(i + 1 > (nx - nxa + 1)){
ggg[ij]=exp(-pow(apara * (i - nx + nxa), 2));
}
else if(j + 1 > (ny - nya + 1)){
ggg[ij]=exp(-pow(apara * (j - ny + nya), 2));
}
else{
ggg[ij]=1.0;
}
}
__global__ void finidyy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * ai[0] - c1 * ai[1]);
}
else if(j == 1){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j] - ai[j - 1]) - c1 * (ai[j + 1] - ai[j - 2]));
}
}
__global__ void finidyx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 2){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (-ai[ny - 1]) + c1 * ai[ny - 2]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j + 1] - ai[j]) - c1 * (ai[j + 2] - ai[j - 1]));
}
}
__global__ void finidxy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * aj[0] - c1 * aj[ny]);
}
else if(i == 1){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[i * ny] - aj[(i - 1) * ny]) - c1 * (aj[(i + 1) * ny] - aj[(i - 2) * ny]));
}
}
__global__ void finidxx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 2){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (-aj[(nx - 1) * ny ]) + c1 * aj[(nx - 2) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[(i + 1) * ny] - aj[i * ny]) - c1 * (aj[(i + 2) * ny] - aj[(i - 1) * ny]));
}
}
__global__ void sxy(float *sxx, float *syy, float *sxy,
const float *lam, const float *rig, const float *ggg,
const float *dxvx, const float *dxvy, const float *dyvx, const float *dyvy,
const int nx, const int ny, const float dt){
devij(nx, ny);
float ram1 = lam[(i * 2 + 1) * ny + j * 2];
float rig1 = rig[(i * 2 + 1) * ny + j * 2];
float rig2 = rig[i * 2 * ny + j * 2 + 1];
float gg = ggg[ij];
float sxxt1ij = (ram1 + 2.0 * rig1) * dxvx[ij] + ram1 * dyvy[ij];
float syyt1ij = (ram1 + 2.0 * rig1) * dyvy[ij] + ram1 * dxvx[ij];
float sxyt1ij = rig2 * (dxvy[ij] + dyvx[ij]);
sxx[ij] = sxx[ij] * gg + dt * sxxt1ij;
syy[ij] = syy[ij] * gg + dt * syyt1ij;
sxy[ij] = sxy[ij] * gg + dt * sxyt1ij;
if(j == na) syy[i * ny + na] = 0.0;
}
__global__ void vxyuxy(float *vx, float *vy, float *ux, float *uy,
const float *dxsxx, const float *dxsxy, const float *dysxy, const float *dysyy,
const float *ggg, const float *den, const float t, const float ftmax,
const int nx, const int ny, const float dx, const float dy, const float dt,
const float t0, const float at){
devij(nx, ny);
float gg = ggg[ij];
float denvx = den[i * 2 * ny + j * 2];
float denvy = den[(i * 2 + 1) * ny + j * 2 + 1];
float fx1,fy1;
if(t < ftmax){
fx1 = rmxx * psv::fxmxx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
rmxy * psv::fxmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
fxx * psv::fx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpxx * psv::exforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
fy1 = rmyx * psv::fzmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
rmyy * psv::fzmzz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
fzz * psv::fz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpzz * psv::ezforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
}
else{
fx1 = 0.0;
fy1 = 0.0;
}
float uxt2ij = (dxsxx[ij] + dysxy[ij] + fx1) / denvx;
float uyt2ij = (dxsxy[ij] + dysyy[ij] + fy1) / denvy;
vx[ij] = vx[ij] * gg + dt * uxt2ij;
vy[ij] = vy[ij] * gg + dt * uyt2ij;
ux[ij] = ux[ij] * gg + dt * vx[ij];
uy[ij] = uy[ij] * gg + dt * vy[ij];
}
__global__ void uxyall(float *uxall, float *uyall, const float *ux, const float *uy,
const int *istx, const int *isty, const int it1, const int ntskp, const int ny){
int ns = blockIdx.x;
int isx = istx[ns]-1;
int isy = isty[ns]-1;
if(threadIdx.x){
uxall[ns * ntskp + it1] = ux[isx * ny + isy];
}
else{
uyall[ns * ntskp + it1] = uy[isx * ny + isy];
}
}
__global__ void ups(float *up, float *us, const float *dxux, const float *dyuy,
const float *dxuy, const float *dyux, const int nx, const int ny){
devij(nx, ny);
up[ij] = dxux[ij] + dyuy[ij];
us[ij] = dxuy[ij] - dyux[ij];
}
void query(){
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i){
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", (unsigned int)devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", (unsigned int)devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", (unsigned int)devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", (unsigned int)devProp.totalConstMem);
printf("Texture alignment: %u\n", (unsigned int)devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
void forward(const char *oname, const char *wname, const int output){
// dimension
float *sxx = mat::create(nx * ny), *sxy = mat::create(nx * ny), *syy = mat::create(nx * ny);
float *den = mat::create(nx2 * ny2), *rig = mat::create(nx2 * ny2), *lam = mat::create(nx2 * ny2);
float *ux = mat::create(nx * ny), *uy = mat::create(nx * ny);
float *vx = mat::create(nx * ny), *vy = mat::create(nx * ny);
float *up = mat::create(nx * ny), *us = mat::create(nx * ny);
float *dxux = mat::create(nx * ny), *dxuy = mat::create(nx * ny);
float *dyux = mat::create(nx * ny), *dyuy = mat::create(nx * ny);
float *dxvx = mat::create(nx * ny), *dxvy = mat::create(nx * ny);
float *dyvx = mat::create(nx * ny), *dyvy = mat::create(nx * ny);
float *dxsxx = mat::create(nx * ny), *dxsxy = mat::create(nx * ny);
float *dysxy = mat::create(nx * ny), *dysyy = mat::create(nx * ny);
float *ggg = mat::create(nx * ny);
float *uxall = mat::create(nst * ntskp), *uyall = mat::create(nst * ntskp);
float *nd = mat::create(nx2), *q1d = mat::create(nx2);
float *h_up = mat::create_h(nx * ny), *h_us = mat::create_h(nx * ny);
float *h_uxall = mat::create_h(nst* ntskp), *h_uyall = mat::create_h(nst* ntskp);
int *istx = mat::create_i(nst), *isty = mat::create_i(nst);
// output file
FILE *wfile=fopen(wname,"w");
FILE *ofile=fopen(oname,"w");
// observation points
psv::istxy<<<nst, 1>>>(istx, isty, na);
// velocity structure
FILE *n4096 = fopen("N4096.dat", "r");
FILE *q14096 = fopen("Q14096.dat", "r");
float *h_nd = mat::create_h(nx2);
float *h_q1d = mat::create_h(nx2);
for(int i = 0; i < nx2; i++){
fscanf(n4096, "%f", &h_nd[i]);
fscanf(q14096, "%f", &h_q1d[i]);
}
fclose(n4096);
fclose(q14096);
mat::copyhd(nd, h_nd, nx2);
mat::copyhd(q1d, h_q1d, nx2);
free(h_nd);
free(h_q1d);
psv::rdl<<<nx2 * nbt, ny2 / nbt>>>(rig, den, lam, nd, q1d, nx2, ny2, dy);
// initialize
float ftmax = t0 + at * 2;
mat::set(vx, 0.0, nx, ny);
mat::set(vy, 0.0, nx, ny);
mat::set(ux, 0.0, nx, ny);
mat::set(uy, 0.0, nx, ny);
mat::set(sxx, 0.0, nx, ny);
mat::set(sxy, 0.0, nx, ny);
mat::set(syy, 0.0, nx, ny);
// absorbing boundary confition
float apara = 0.015;
psv::gg<<<nx * nbt, ny / nbt>>>(ggg, apara, nx, ny, nxa, nya);
// time step start
int ntw = 0;
int ntt = 0;
clock_t timestart=clock();
for(int it = 0; it < ntmax; it++){
if(it % 500 == 0){
printf("timestep: %d / %d\n", it, ntmax);
}
ntt++;
ntw++;
float t=dt*it;
psv::finidxx<<<nx * nbt, ny / nbt>>>(vx, dxvx, nx, ny, dx, dy, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(vy, dxvy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(vx, dyvx, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(vy, dyvy, nx, ny, dx, dy, dt);
psv::sxy<<<nx * nbt, ny / nbt>>>(sxx, syy, sxy, lam, rig, ggg, dxvx, dxvy, dyvx, dyvy, nx, ny, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(sxx, dxsxx, nx, ny, dx, dy, dt);
psv::finidxx<<<nx * nbt, ny / nbt>>>(sxy, dxsxy, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(sxy, dysxy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(syy, dysyy, nx, ny, dx, dy, dt);
psv::vxyuxy<<<nx * nbt, ny / nbt>>>(vx, vy, ux, uy, dxsxx, dxsxy, dysxy, dysyy, ggg, den, t, ftmax, nx, ny, dx, dy, dt, t0, at);
if(ntt == nskip){
// save waveform
ntt = 0;
uxyall<<<nst, 2>>>(uxall, uyall, ux, uy, istx, isty, (it+1)/nskip, ntskp, ny);
}
if(output && ntw == nwrite){
// write snapshot
ntw = 0;
psv::finidxx<<<nx * nbt, ny / nbt>>>(ux, dxux, nx, ny, dx, dy, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(uy, dxuy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(ux, dyux, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(uy, dyuy, nx, ny, dx, dy, dt);
psv::ups<<< nx * nbt, ny / nbt>>>(up, us, dxux, dyuy, dxuy, dyux, nx, ny);
mat::write(ofile, up, h_up, nx, ny);
mat::write(ofile, us, h_us, nx, ny);
}
}
{
printf("\ntotal time: %.2fs\n",(float)(clock()-timestart)/CLOCKS_PER_SEC);
size_t free_byte ;
size_t total_byte ;
cudaMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
// write waveform
mat::write(wfile, uxall, h_uxall, nst, ntskp);
mat::write(wfile, uyall, h_uxall, nst, ntskp);
fclose(ofile);
fclose(wfile);
cudaFree(sxx); cudaFree(sxy); cudaFree(syy);
cudaFree(den); cudaFree(rig); cudaFree(lam);
cudaFree(ux); cudaFree(uy);
cudaFree(vx); cudaFree(uy);
cudaFree(up); cudaFree(us);
cudaFree(dxux); cudaFree(dxuy);
cudaFree(dyux); cudaFree(dyuy);
cudaFree(dxvx); cudaFree(dxvy);
cudaFree(dyvx); cudaFree(dyvy);
cudaFree(dxsxx); cudaFree(dxsxy);
cudaFree(dysxy); cudaFree(dysyy);
cudaFree(ggg);
cudaFree(nd); cudaFree(q1d);
cudaFree(istx); cudaFree(isty);
free(h_up); free(h_us);
free(h_uxall); free(h_uyall);
}
void waveform(const char *wname){
int ndskip = 1;
float dt2 = dt * 10, dx2 = dx * 4;
float *ux = mat::create_h(nst * ntskp), *uz = mat::create_h(nst * ntskp);
FILE *file = fopen(wname,"r");
FILE *filex = fopen("ux", "w");
FILE *filez = fopen("uz", "w");
mat::read(file, ux, nst, ntskp);
mat::read(file, uz, nst, ntskp);
fclose(file);
for(int i = 0; i < nst; i += nsskip){
fprintf(filex, ">\n");
fprintf(filez, ">\n");
for(int j = 0; j < ntskp; j += ndskip){
int ij = i * ntskp + j;
float tm = j*dt2;
float shift = i*dx2;
fprintf(filex, "%f %f\n", tm, ux[ij] * 15.0 + shift);
fprintf(filez, "%f %f\n", tm, uz[ij] * 15.0 + shift);
}
}
}
void snapshot(const char *oname){
FILE *file=fopen(oname,"r");
float *up = mat::create_h(nx * ny), *us = mat::create_h(nx * ny);
float *u = mat::create_h(nx * ny), *p = mat::create_h(nx * ny), *s = mat::create_h(nx * ny);
int n[5]={0,1,2,3,4};
FILE **snapshot = (FILE **)malloc(5*sizeof(FILE *));
*snapshot = fopen("snap1", "w");
*(snapshot + 1) = fopen("snap2", "w");
*(snapshot + 2) = fopen("snap3", "w");
*(snapshot + 3) = fopen("snap4", "w");
*(snapshot + 4) = fopen("snap5", "w");
float pmax, smax, cp, lp ,cs, ls, x, y;
for(int isnap = 0; isnap < nsnap; isnap++){
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
u[i * ny + j] = 0;
}
}
mat::read(file, up, nx, ny);
mat::read(file, us, nx, ny);
pmax=0.0;
smax=0.0;
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
if(pmax < abs(up[ij])){
pmax = abs(up[ij]);
}
if(smax < abs(us[ij])){
smax = abs(us[ij]);
}
}
}
// printf("Pmax=%f Smax=%f\n",pmax,smax);
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cp=pamp;
lp=0.1*pmax;
if(abs(up[ij]) > cp && up[ij] < 0.0){
up[ij] = -cp;
}
else if(abs(up[ij]) > cp && up[ij] > 0.0){
up[ij] = cp;
}
if(abs(us[ij]) < lp){
up[ij] = 0.0;
}
}
}
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cs = samp;
ls = 0.1 * smax;
if(abs(us[ij]) > cs && us[ij] < 0.0){
us[ij] = -cs;
}
else if(abs(us[ij]) > cs && us[ij] > 0.0){
us[ij] = cs;
}
if(abs(us[ij]) < ls){
us[ij] = 0.0;
}
}
}
if(isnap == n[0] || isnap == n[1] || isnap == n[2] || isnap == n[3] || isnap == n[4]){
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
p[ij] = up[ij] / pampall;
s[ij] = us[ij] / sampall;
// if(up[i][j]>1e-5||us[i][j]>1e-5){
// printf("%f %f\n", up[i][j],us[i][j]);
// }
}
}
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
if(abs(s[ij]) > abs(p[ij])){
u[ij] = -abs(s[ij]);
}
else if(abs(p[ij]) > abs(s[ij])){
u[ij] = abs(s[ij]);
}
fprintf(*(snapshot+isnap), "%f %f %f\n", x, y, u[ij]);
}
}
}
}
fclose(file);
fclose(*(snapshot));
fclose(*(snapshot+1));
fclose(*(snapshot+2));
fclose(*(snapshot+3));
fclose(*(snapshot+4));
}
}
int main(int argc , char *argv[]){
// command-line options (e.g. "psv.exe fsw". default: f)
// q: gpu device query
// f: forward modeling with waveform output only
// o: forward modeling with waveform and snapshot output (with much more time consumption)
// w: convert output waveform data to gmt readable format
// s: convert output snapshot data to gmt readable format
int cfg[5] = {0};
if(argc > 1){
for(int i = 0; i < argv[1][i] != '\0'; i++){
switch(argv[1][i]){
case 'q': cfg[0] = 1;break;
case 'f': cfg[1] = 1; break;
case 'o': cfg[1] = 1; cfg[2] = 1; break;
case 'w': cfg[3] = 1; break;
case 's': cfg[4] = 1; break;
}
}
}
else{
cfg[1] = 0;
}
// output file name
char *oname="opsv";
char *wname="wpsv";
if(cfg[0]) psv::query();
if(cfg[1]) psv::forward(oname, wname, cfg[2]);
if(cfg[3]) psv::waveform(wname);
if(cfg[4]) psv::snapshot(oname);
}
|
237833cedb70d74efed1bbbe199b6bad8153292d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int i = threadIdx.x;
float f = d_in[i];
d_out[i] = f*f*f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 237833cedb70d74efed1bbbe199b6bad8153292d.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int i = threadIdx.x;
float f = d_in[i];
d_out[i] = f*f*f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
0c5201109f3afe7c6d09f36c03675fa490f9e18a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "noNAsPmccMeans.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nRows = 1;
int nCols = 1;
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *means = NULL;
hipMalloc(&means, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
noNAsPmccMeans), dim3(gridBlock),dim3(threadBlock), 0, 0, nRows,nCols,a,means);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
noNAsPmccMeans), dim3(gridBlock),dim3(threadBlock), 0, 0, nRows,nCols,a,means);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
noNAsPmccMeans), dim3(gridBlock),dim3(threadBlock), 0, 0, nRows,nCols,a,means);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0c5201109f3afe7c6d09f36c03675fa490f9e18a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "noNAsPmccMeans.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nRows = 1;
int nCols = 1;
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *means = NULL;
cudaMalloc(&means, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
noNAsPmccMeans<<<gridBlock,threadBlock>>>(nRows,nCols,a,means);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
noNAsPmccMeans<<<gridBlock,threadBlock>>>(nRows,nCols,a,means);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
noNAsPmccMeans<<<gridBlock,threadBlock>>>(nRows,nCols,a,means);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6f23b716335aeb7da7883b3dd34df13ba906737f.hip | // !!! This is a file automatically generated by hipify!!!
/*================================================================
* Display Images with CPU
*----------------------------------------------------------------
* Licence isn't exists.
*
* vmg.c
*
* Copyright (c) 2012 NULL
*
*================================================================*/
#include "Gaussian.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ static void KerGaussianX(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
__global__ static void KerGaussianY(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
//__global__ static void KerGaussian(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
__global__ static void KerMakeCoeff(float *lpCoeff, int radius)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x);
// Gaussian
lpCoeff[i] = 2.0 * ::exp(-(float)(i * i) / (float)(2 * radius * radius));
// Lorentzian
// lpCoeff[i] = (256.0 / (1 + i * i));
}
__global__ static void KerGaussianX(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = width * y + x;
int ratio;
UINT32 pixel;
float r, g, b;
int tx;
float totalRatio = 0;
//if(x < radius || x > width - radius)
//{return;}
r = g = b = 0;
totalRatio = 0;
for(int k = -radius; k <= radius; k ++)
{
tx = x + k;
if(tx >= 0 && tx < width)
{
pixel = lpSrc[idx + k];
//pixel = lpSrc[y * width + tx];
ratio = lpCoeff[(int)::abs(k)];
r += (0xFF & (pixel >> 16)) * ratio;
g += (0xFF & (pixel >> 8)) * ratio;
b += (0xFF & (pixel)) * ratio;
totalRatio += ratio;
}
}
r /= totalRatio;
g /= totalRatio;
b /= totalRatio;
lpDst[idx] = ((UINT32)r << 16) | ((UINT32)g << 8) | ((UINT32)b);
}
__global__ static void KerGaussianY(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = width * y + x;
int ratio;
UINT32 pixel;
float r, g, b;
int ty;
float totalRatio = 0;
r = g = b = 0;
totalRatio = 0;
for(int k = -radius; k <= radius; k ++)
{
ty = y + k;
if(ty >= 0 && ty < height)
{
pixel = lpSrc[ty * width + x];
ratio = lpCoeff[::abs(k)];
r += (0xFF & (pixel >> 16)) * ratio;
g += (0xFF & (pixel >> 8)) * ratio;
b += (0xFF & (pixel)) * ratio;
totalRatio += ratio;
}
}
r /= totalRatio;
g /= totalRatio;
b /= totalRatio;
lpDst[idx] = ((UINT32)r << 16) | ((UINT32)g << 8) | ((UINT32)b);
}
//#include <cstdio>
#define BLOCKSIZE 16
void Imgproc::DCuGaussian(UINT32 *d_lpDst, UINT32 *d_lpSrc, UINT32 *d_lpTmp, float *d_lpCoeff, int width, int height, int radius)
{
dim3 dimThread(BLOCKSIZE, BLOCKSIZE);
dim3 dimBlock((width + BLOCKSIZE - 1) / BLOCKSIZE, (height + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( ::KerMakeCoeff), dim3(radius), dim3(1), 0, 0, d_lpCoeff, radius);
//printf("%d, %d\n", width, height);
/* Hrizontal bluring */
hipLaunchKernelGGL(( ::KerGaussianX), dim3(dimBlock), dim3(dimThread), 0, 0, d_lpTmp, d_lpSrc, d_lpCoeff, width, height, radius);
/* Vertical bluring (Source array "d_lpSrc" is used as destination) */
hipLaunchKernelGGL(( ::KerGaussianY), dim3(dimBlock), dim3(dimThread), 0, 0, d_lpDst, d_lpTmp, d_lpCoeff, width, height, radius);
}
void Imgproc::CuGaussian(UINT32 *lpDst, UINT32 *lpSrc, int width, int height, int radius)
{
UINT32 *d_lpSrc, *d_lpDst;
float *d_lpCoeff;
::hipMalloc((void **)&d_lpCoeff, sizeof(float) * radius);
::hipMalloc((void **)&d_lpSrc, sizeof(UINT32) * width * height);
::hipMalloc((void **)&d_lpDst, sizeof(UINT32) * width * height);
::hipMemcpy(d_lpSrc, lpSrc, sizeof(UINT32) * width * height, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ::KerMakeCoeff), dim3(radius), dim3(1), 0, 0, d_lpCoeff, radius);
dim3 dimThread(BLOCKSIZE, BLOCKSIZE);
dim3 dimBlock((width + BLOCKSIZE - 1) / BLOCKSIZE, (height + BLOCKSIZE - 1) / BLOCKSIZE);
/* Hrizontal bluring */
hipLaunchKernelGGL(( ::KerGaussianX), dim3(dimBlock), dim3(dimThread), 0, 0, d_lpDst, d_lpSrc, d_lpCoeff, width, height, radius);
/* Vertical bluring (Source array "d_lpSrc" is used as destination) */
hipLaunchKernelGGL(( ::KerGaussianY), dim3(dimBlock), dim3(dimThread), 0, 0, d_lpSrc, d_lpDst, d_lpCoeff, width, height, radius);
::hipMemcpy(lpDst, d_lpSrc, sizeof(UINT32) * width * height, hipMemcpyDeviceToHost);
::hipFree(d_lpCoeff);
::hipFree(d_lpSrc);
::hipFree(d_lpDst);
} | 6f23b716335aeb7da7883b3dd34df13ba906737f.cu | /*================================================================
* Display Images with CPU
*----------------------------------------------------------------
* Licence isn't exists.
*
* vmg.c
*
* Copyright (c) 2012 NULL
*
*================================================================*/
#include "Gaussian.h"
#include <cuda.h>
#include <cuda_runtime.h>
__global__ static void KerGaussianX(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
__global__ static void KerGaussianY(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
//__global__ static void KerGaussian(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius);
__global__ static void KerMakeCoeff(float *lpCoeff, int radius)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x);
// Gaussian
lpCoeff[i] = 2.0 * ::exp(-(float)(i * i) / (float)(2 * radius * radius));
// Lorentzian
// lpCoeff[i] = (256.0 / (1 + i * i));
}
__global__ static void KerGaussianX(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = width * y + x;
int ratio;
UINT32 pixel;
float r, g, b;
int tx;
float totalRatio = 0;
//if(x < radius || x > width - radius)
//{return;}
r = g = b = 0;
totalRatio = 0;
for(int k = -radius; k <= radius; k ++)
{
tx = x + k;
if(tx >= 0 && tx < width)
{
pixel = lpSrc[idx + k];
//pixel = lpSrc[y * width + tx];
ratio = lpCoeff[(int)::abs(k)];
r += (0xFF & (pixel >> 16)) * ratio;
g += (0xFF & (pixel >> 8)) * ratio;
b += (0xFF & (pixel)) * ratio;
totalRatio += ratio;
}
}
r /= totalRatio;
g /= totalRatio;
b /= totalRatio;
lpDst[idx] = ((UINT32)r << 16) | ((UINT32)g << 8) | ((UINT32)b);
}
__global__ static void KerGaussianY(UINT32 *lpDst, UINT32 *lpSrc, float *lpCoeff, int width, int height, int radius)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = width * y + x;
int ratio;
UINT32 pixel;
float r, g, b;
int ty;
float totalRatio = 0;
r = g = b = 0;
totalRatio = 0;
for(int k = -radius; k <= radius; k ++)
{
ty = y + k;
if(ty >= 0 && ty < height)
{
pixel = lpSrc[ty * width + x];
ratio = lpCoeff[::abs(k)];
r += (0xFF & (pixel >> 16)) * ratio;
g += (0xFF & (pixel >> 8)) * ratio;
b += (0xFF & (pixel)) * ratio;
totalRatio += ratio;
}
}
r /= totalRatio;
g /= totalRatio;
b /= totalRatio;
lpDst[idx] = ((UINT32)r << 16) | ((UINT32)g << 8) | ((UINT32)b);
}
//#include <cstdio>
#define BLOCKSIZE 16
void Imgproc::DCuGaussian(UINT32 *d_lpDst, UINT32 *d_lpSrc, UINT32 *d_lpTmp, float *d_lpCoeff, int width, int height, int radius)
{
dim3 dimThread(BLOCKSIZE, BLOCKSIZE);
dim3 dimBlock((width + BLOCKSIZE - 1) / BLOCKSIZE, (height + BLOCKSIZE - 1) / BLOCKSIZE);
::KerMakeCoeff<<<radius, 1>>>(d_lpCoeff, radius);
//printf("%d, %d\n", width, height);
/* Hrizontal bluring */
::KerGaussianX<<<dimBlock, dimThread>>>(d_lpTmp, d_lpSrc, d_lpCoeff, width, height, radius);
/* Vertical bluring (Source array "d_lpSrc" is used as destination) */
::KerGaussianY<<<dimBlock, dimThread>>>(d_lpDst, d_lpTmp, d_lpCoeff, width, height, radius);
}
void Imgproc::CuGaussian(UINT32 *lpDst, UINT32 *lpSrc, int width, int height, int radius)
{
UINT32 *d_lpSrc, *d_lpDst;
float *d_lpCoeff;
::cudaMalloc((void **)&d_lpCoeff, sizeof(float) * radius);
::cudaMalloc((void **)&d_lpSrc, sizeof(UINT32) * width * height);
::cudaMalloc((void **)&d_lpDst, sizeof(UINT32) * width * height);
::cudaMemcpy(d_lpSrc, lpSrc, sizeof(UINT32) * width * height, cudaMemcpyHostToDevice);
::KerMakeCoeff<<<radius, 1>>>(d_lpCoeff, radius);
dim3 dimThread(BLOCKSIZE, BLOCKSIZE);
dim3 dimBlock((width + BLOCKSIZE - 1) / BLOCKSIZE, (height + BLOCKSIZE - 1) / BLOCKSIZE);
/* Hrizontal bluring */
::KerGaussianX<<<dimBlock, dimThread>>>(d_lpDst, d_lpSrc, d_lpCoeff, width, height, radius);
/* Vertical bluring (Source array "d_lpSrc" is used as destination) */
::KerGaussianY<<<dimBlock, dimThread>>>(d_lpSrc, d_lpDst, d_lpCoeff, width, height, radius);
::cudaMemcpy(lpDst, d_lpSrc, sizeof(UINT32) * width * height, cudaMemcpyDeviceToHost);
::cudaFree(d_lpCoeff);
::cudaFree(d_lpSrc);
::cudaFree(d_lpDst);
} |
6a9412bf2f13756a9204b1877e1a847f05ba4b91.hip | // !!! This is a file automatically generated by hipify!!!
#include "THZCStorage.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
// #include <complex.h>
// #define cx float _Complex
void THZCudaStorage_fill(THCState *state, THZCudaStorage *self, cx value)
{
thrust::device_ptr<ccx> self_data((ccx*)self->data);
thrust::fill(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+self->size, ccx(crealf(value),cimagf(value)));
}
void THZCudaStorage_resize(THCState *state, THZCudaStorage *self, long size)
{
THArgCheck(size >= 0, 2, "invalid size");
if(!(self->flag & TH_STORAGE_RESIZABLE))
return;
if(size == 0)
{
if(self->flag & TH_STORAGE_FREEMEM) {
THZCudaCheck(THZCudaFree(state, self->data));
THZCHeapUpdate(state, -self->size * sizeof(cux));
}
self->data = NULL;
self->size = 0;
}
else
{
cux *data = NULL;
// update heap *before* attempting malloc, to free space for the malloc
THZCHeapUpdate(state, size * sizeof(cux));
hipError_t err = THZCudaMalloc(state, (void**)(&data), size * sizeof(cux));
if(err != hipSuccess) {
THZCHeapUpdate(state, -size * sizeof(cux));
}
THZCudaCheck(err);
if (self->data) {
THZCudaCheck(hipMemcpyAsync(data,
self->data,
THMin(self->size, size) * sizeof(cux),
hipMemcpyDeviceToDevice,
THCState_getCurrentStream(state)));
THZCudaCheck(THZCudaFree(state, self->data));
THZCHeapUpdate(state, -self->size * sizeof(cux));
}
self->data = data;
self->size = size;
}
}
| 6a9412bf2f13756a9204b1877e1a847f05ba4b91.cu | #include "THZCStorage.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
// #include <complex.h>
// #define cx float _Complex
void THZCudaStorage_fill(THCState *state, THZCudaStorage *self, cx value)
{
thrust::device_ptr<ccx> self_data((ccx*)self->data);
thrust::fill(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+self->size, ccx(crealf(value),cimagf(value)));
}
void THZCudaStorage_resize(THCState *state, THZCudaStorage *self, long size)
{
THArgCheck(size >= 0, 2, "invalid size");
if(!(self->flag & TH_STORAGE_RESIZABLE))
return;
if(size == 0)
{
if(self->flag & TH_STORAGE_FREEMEM) {
THZCudaCheck(THZCudaFree(state, self->data));
THZCHeapUpdate(state, -self->size * sizeof(cux));
}
self->data = NULL;
self->size = 0;
}
else
{
cux *data = NULL;
// update heap *before* attempting malloc, to free space for the malloc
THZCHeapUpdate(state, size * sizeof(cux));
cudaError_t err = THZCudaMalloc(state, (void**)(&data), size * sizeof(cux));
if(err != cudaSuccess) {
THZCHeapUpdate(state, -size * sizeof(cux));
}
THZCudaCheck(err);
if (self->data) {
THZCudaCheck(cudaMemcpyAsync(data,
self->data,
THMin(self->size, size) * sizeof(cux),
cudaMemcpyDeviceToDevice,
THCState_getCurrentStream(state)));
THZCudaCheck(THZCudaFree(state, self->data));
THZCHeapUpdate(state, -self->size * sizeof(cux));
}
self->data = data;
self->size = size;
}
}
|
0937cccc66f9cd02bb9cec5f40490c36d1711108.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
//--------------------------------------------------------------------
//A : m x l
//B : l x n
//C : m x n (C=A*B)
//--------------------------------------------------------------------
void host_mm(float* C, float* A, float* B, int m, int n, int l){
for(int i=0; i<m; i++)
for(int j=0; j<n; j++)
{
float s=0;
for (int k=0; k<l; k++)
{
float a = A[i*l + k];
float b = B[k*n + j];
s += a * b;
}
C[i*n + j] = s;
}
}
//--------------------------------------------------------------------
__global__ void gpu_mm(float* C, float* A, float* B, int m, int n, int l){
//// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < l; ++k)
{
float Aelement = A[ty * l + k];
float Belement = B[k * n + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
C[ty * n + tx] = Pvalue;
//printf("threadIdx.x=%d\n", threadIdx.x);
}
//----------------------------------------------
double diff(float* a, float* b, int n){
double s=0, r=0;
for(int k=0; k<n; k++)
{
double w=a[k]-b[k];
s+=w*w;
r+=a[k]*a[k];
}
return sqrt(s/r);
}
void random(float* a, int n){
for(int k=0; k<n; k++){
a[k]=(float)rand()/RAND_MAX*2-1;
}
}
//----------------------------------------------
void testMatrix(int m, int n, int l)
{
//initialize
float *a = (float*)malloc(sizeof(float)*m*l);
float *b = (float*)malloc(sizeof(float)*l*n);
float *c1 = (float*)malloc(sizeof(float)*m*n);
float *c2 = (float*)malloc(sizeof(float)*m*n);
srand(time(0));
random(a,m*l);
random(b,l*n);
memset(c1, 0, sizeof(float)*m*n);
memset(c2, 0, sizeof(float)*m*n);
float *ga,*gb,*gc;
hipMalloc((void**)&ga, m*l*sizeof(float));
hipMalloc((void**)&gb, l*n*sizeof(float));
hipMalloc((void**)&gc, m*n*sizeof(float));
hipMemcpy(ga, a, m*l*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gb, b, l*n*sizeof(float), hipMemcpyHostToDevice);
hipMemset(gc, 0, m*n*sizeof(float));
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//SBMT(Single Block, Multiple Threads)
hipLaunchKernelGGL(( gpu_mm), dim3(dim3(1,1,1)), dim3(dim3(m, n, 1)), 0, 0, gc,ga,gb,m,n,l);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(c2, gc, m*n*sizeof(float), hipMemcpyDeviceToHost);
double c_start,c_stop;
double CPU_execution_time;
c_start = (double)clock();
host_mm(c1, a, b, m, n, l);
c_stop = (double)clock();
CPU_execution_time = (c_stop - c_start)/(double)CLOCKS_PER_SEC;
//check precision
double err=diff(c1,c2,m*n);
printf("err = %g\n", err);
printf(" ======== (Execution Infomation) ========\n");
printf(" Excuetion Time on GPU: %3.20f s\n",elapsedTime/1000);
printf(" Excuetion Time on CPU: %3.20f s\n",CPU_execution_time);
printf(" Speed up = %f\n",(CPU_execution_time/(elapsedTime/1000)));
printf(" ========================================\n\n");
free(a);
free(b);
free(c1);
free(c2);
hipFree(ga);
hipFree(gb);
hipFree(gc);
}
//----------------------------------------------
int main()
{
int m=32;
int n=32;
int l=32;
testMatrix(m,n,l);
return 0;
}
| 0937cccc66f9cd02bb9cec5f40490c36d1711108.cu |
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
//--------------------------------------------------------------------
//A : m x l
//B : l x n
//C : m x n (C=A*B)
//--------------------------------------------------------------------
void host_mm(float* C, float* A, float* B, int m, int n, int l){
for(int i=0; i<m; i++)
for(int j=0; j<n; j++)
{
float s=0;
for (int k=0; k<l; k++)
{
float a = A[i*l + k];
float b = B[k*n + j];
s += a * b;
}
C[i*n + j] = s;
}
}
//--------------------------------------------------------------------
__global__ void gpu_mm(float* C, float* A, float* B, int m, int n, int l){
//// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < l; ++k)
{
float Aelement = A[ty * l + k];
float Belement = B[k * n + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
C[ty * n + tx] = Pvalue;
//printf("threadIdx.x=%d\n", threadIdx.x);
}
//----------------------------------------------
double diff(float* a, float* b, int n){
double s=0, r=0;
for(int k=0; k<n; k++)
{
double w=a[k]-b[k];
s+=w*w;
r+=a[k]*a[k];
}
return sqrt(s/r);
}
void random(float* a, int n){
for(int k=0; k<n; k++){
a[k]=(float)rand()/RAND_MAX*2-1;
}
}
//----------------------------------------------
void testMatrix(int m, int n, int l)
{
//initialize
float *a = (float*)malloc(sizeof(float)*m*l);
float *b = (float*)malloc(sizeof(float)*l*n);
float *c1 = (float*)malloc(sizeof(float)*m*n);
float *c2 = (float*)malloc(sizeof(float)*m*n);
srand(time(0));
random(a,m*l);
random(b,l*n);
memset(c1, 0, sizeof(float)*m*n);
memset(c2, 0, sizeof(float)*m*n);
float *ga,*gb,*gc;
cudaMalloc((void**)&ga, m*l*sizeof(float));
cudaMalloc((void**)&gb, l*n*sizeof(float));
cudaMalloc((void**)&gc, m*n*sizeof(float));
cudaMemcpy(ga, a, m*l*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gb, b, l*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(gc, 0, m*n*sizeof(float));
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//SBMT(Single Block, Multiple Threads)
gpu_mm<<<dim3(1,1,1), dim3(m, n, 1)>>> (gc,ga,gb,m,n,l);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c2, gc, m*n*sizeof(float), cudaMemcpyDeviceToHost);
double c_start,c_stop;
double CPU_execution_time;
c_start = (double)clock();
host_mm(c1, a, b, m, n, l);
c_stop = (double)clock();
CPU_execution_time = (c_stop - c_start)/(double)CLOCKS_PER_SEC;
//check precision
double err=diff(c1,c2,m*n);
printf("err = %g\n", err);
printf(" ======== (Execution Infomation) ========\n");
printf(" Excuetion Time on GPU: %3.20f s\n",elapsedTime/1000);
printf(" Excuetion Time on CPU: %3.20f s\n",CPU_execution_time);
printf(" Speed up = %f\n",(CPU_execution_time/(elapsedTime/1000)));
printf(" ========================================\n\n");
free(a);
free(b);
free(c1);
free(c2);
cudaFree(ga);
cudaFree(gb);
cudaFree(gc);
}
//----------------------------------------------
int main()
{
int m=32;
int n=32;
int l=32;
testMatrix(m,n,l);
return 0;
}
|
029177ed88e8f7af5924331dbde8fe9d68792eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************************
***********************************************************************
**********************Coincidencias Similares**************************
***********************************************************************
****************Analizador de un da contra un da*********************
***********************************************************************
***********************************************************************
***********************************************************************
****************************Diego GR***********************************
************************Alejandro Morales******************************
***********************************************************************
*/
/*
cd /home/monitec/Documentos/ProgramasEnC/ExtractorSimilares/
gcc -o a AnalisadorMuestras.c
./a '03-Costa Rica Nacional-20151031000000-50-0000-canal 7-0000-Servidor15-0000-0000.DAT'
./a '01-Costarica05-20151022071837-600-90.3 MHz-7E4885960-100-PCx_10_14-2-MNTC0023.dat'
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <dirent.h>
#include <time.h>
#define CANTIDAD_MAXIMA_RECONOCIMIENTOS 700
#define NUM_BYTES_FRECUENCY 10
#define NUM_FREC_COM 4
#define LEN_FREC_COM 32
#define NUM_INT_ESCALON 4
#define TAM_ESCALON 128
#define PORCENTAJE_BARRA_MINIMO_PERMITIDO 0.35f
#define PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO 0.8f
#define PUNTOS_ANALISIS_PRIMER_FILTRO 100
#define TAMANO_MINIMO_MUESTRA_SEGUNDOS 10
#define RESTRICCION_CERCANIA_SEGUNDOS 10
#define NUM_BYTES_SHORT 2
const float CONST_MEAN[] = {0.973f,0.93f,0.905f,0.9f,0};
typedef struct{
unsigned int nums[NUM_INT_ESCALON];
char bloqueado;
char vacio;
}entero128;
typedef struct{
unsigned int inicio;
unsigned int duracion;
}reconocimiento;
typedef struct{
unsigned int indice;
float porcentaje;
}coincidencia;
typedef struct{
unsigned int inicio;
unsigned int tamano;
int cantidadCoincidencias;
int cantidadReconocimientos;
coincidencia coincidencias[CANTIDAD_MAXIMA_RECONOCIMIENTOS];
reconocimiento reconocimientos[CANTIDAD_MAXIMA_RECONOCIMIENTOS];
}huella;
unsigned int multiplicadorTiempo = 1;
unsigned int SEGUNDOS_POR_DIA = 86400;
void analizar(const char * pRutaDat);
int fsize(const char *filename);
void leerSuperDat(const char * pPathDatIn, int lenBuffer,unsigned short ** sliceDAT);
void discretizarDAT(const int pLenBuffer, unsigned short **pSliceDAT, entero128 *pMatrizOUT);
void extraerMuestra(int pIndiceInicio, const int pTamano, entero128 *pMatrizOUT_DAT, huella *pHuella_Muestra);
void revisarHuella(int pIndiceInicio, const int pLenMatriz, entero128 *pMatrizOUT_DAT, huella * pHuella_Muestra, const int pLimite, const int pAvance, const float pPORCENTAJE);
int main( int arc, char **argv ){
analizar(argv[1]);
return 0;
}
void analizar(const char * pRutaDat){
int lenBuffer = fsize(pRutaDat)/NUM_BYTES_FRECUENCY;
if(lenBuffer <= 126000){
SEGUNDOS_POR_DIA = 630;
}
int vEscalonAnalisis = 0, vIndMatriz, vTamMuestraDAT, vTamMuestraMatriz, vCantidadHuellas = 0, vIndCoincidencias = 0;
multiplicadorTiempo = lenBuffer/SEGUNDOS_POR_DIA;
unsigned short **sliceDAT;
sliceDAT = (unsigned short **)malloc(NUM_FREC_COM*sizeof(short*));
int vIndFrecuency;
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
sliceDAT[vIndFrecuency] = (unsigned short *)malloc(lenBuffer*sizeof(short));
}
leerSuperDat(pRutaDat, lenBuffer, sliceDAT);
int vlenMatriz = lenBuffer - LEN_FREC_COM;
entero128 * vMatrizOUT;
vMatrizOUT = (entero128 *)malloc(vlenMatriz*sizeof(entero128));
for(vIndMatriz = 0; vIndMatriz < vlenMatriz; vIndMatriz ++){
for(vEscalonAnalisis = 0; vEscalonAnalisis < NUM_INT_ESCALON; vEscalonAnalisis ++){
vMatrizOUT[vIndMatriz].nums[vEscalonAnalisis] = 15000;
}
}
discretizarDAT(lenBuffer, sliceDAT, vMatrizOUT);
vTamMuestraDAT = TAMANO_MINIMO_MUESTRA_SEGUNDOS * multiplicadorTiempo;
vTamMuestraMatriz = vTamMuestraDAT - LEN_FREC_COM;
huella * listaHuellas = (huella *)malloc((vlenMatriz/vTamMuestraDAT + 1) * sizeof(huella));
vIndMatriz = 655200;
clock_t startC = clock();
while(vIndMatriz < vlenMatriz - vTamMuestraDAT){
//printf("IND %d - %d , %d , %d\n", vIndMatriz,vlenMatriz - vTamMuestraMatriz,vTamMuestraMatriz, multiplicadorTiempo);
clock_t startComparador = clock();
/*if(listaHuellas[vCantidadHuellas].matriz == NULL)
listaHuellas[vCantidadHuellas].matriz = (entero128 *)malloc(vTamMuestraMatriz*sizeof(entero128));
*/
listaHuellas[vCantidadHuellas].tamano = vTamMuestraDAT;
listaHuellas[vCantidadHuellas].inicio = vIndMatriz;
const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO*PUNTOS_ANALISIS_PRIMER_FILTRO;
int vAvance = vTamMuestraDAT/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vAvance == 0)
vAvance = 1;
revisarHuella(vIndMatriz, vlenMatriz - 12000, vMatrizOUT, &listaHuellas[vCantidadHuellas],vLimite,vAvance,PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO );
//printf("AFUERA -- Cantidad Coincidencias %d \n", listaHuellas[vCantidadHuellas].cantidadCoincidencias);
//revisarHuella(vIndMatriz , vlenMatriz, vMatrizOUT, &listaHuellas[vCantidadHuellas]);
if(listaHuellas[vCantidadHuellas].cantidadCoincidencias > 0){
printf("***************************************************************\nTiempo huella %d/%d = %f\n",vIndMatriz/multiplicadorTiempo,vlenMatriz/multiplicadorTiempo, ((double)clock() - startComparador)/CLOCKS_PER_SEC);
printf("Cantidad Coincidencias %d \n", listaHuellas[vCantidadHuellas].cantidadCoincidencias);
float tSeg = listaHuellas[vCantidadHuellas].inicio/multiplicadorTiempo;
float H_hora = tSeg/3600.0f;
float H_mins = (H_hora - (int)H_hora)*60;
float H_segs = (H_mins - (int)H_mins)*60;
for(vIndCoincidencias = 0; vIndCoincidencias < listaHuellas[vCantidadHuellas].cantidadCoincidencias; vIndCoincidencias ++){
float tSegC = listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice/multiplicadorTiempo;
float C_hora = tSegC/3600.0f;
float C_mins = (C_hora - (int)C_hora)*60;
float C_segs = (C_mins - (int)C_mins)*60;
printf("Huella %d/%d\t%d\t%d -> %d\t%d:%d:%d\t%d -> %d\t%d:%d:%d\t%f\n",vCantidadHuellas,vlenMatriz/vTamMuestraMatriz ,vIndCoincidencias,
listaHuellas[vCantidadHuellas].inicio,
listaHuellas[vCantidadHuellas].inicio/multiplicadorTiempo,
(int)H_hora,(int)H_mins,(int)H_segs,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice/multiplicadorTiempo,
(int)C_hora,(int)C_mins,(int)C_segs,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].porcentaje);
}
vCantidadHuellas++;
//HACER CRECER LA MUESTRA
}else{
int indMatriz;
for(indMatriz = 0; indMatriz < vTamMuestraMatriz; indMatriz ++){
if(!vMatrizOUT[vIndMatriz].vacio)
vMatrizOUT[vIndMatriz + indMatriz].bloqueado = 0;
}
}
//break;
vIndMatriz += vTamMuestraDAT;
}
int i;
for(i = 0 ; i < vCantidadHuellas;i++ ){
printf("Indice %d Coincidencias %d\t", i, listaHuellas[i].cantidadCoincidencias);
}
printf("\nTAM %d %d\n", lenBuffer, vCantidadHuellas);
}
/*
*fsize: indica el tamao en bytes de un archivo.
* IN: nombre del archivo (const char *)
* OUT: tamao en bytes del archivo (int)
*/
int fsize(const char *filename) {
struct stat st;
if (stat(filename, &st) == 0)
return st.st_size;
return -1;
}
/*
leerSuperDat: lee las primeras cuatro frecuencias de un archivo DAT
IN:
** pPathDatIn-> ruta del archivo DAT (char *)
** lenBuffer-> tamao de cada frecuencia (int)
** sliceDAT-> variable de salida, matriz donde se copian la informacion de cada frecuencia (unsigned short **)
*/
void leerSuperDat(const char* pPathDatIn, int lenBuffer,unsigned short ** sliceDAT){
FILE *vArchivo;
int vIndFrecuency;
vArchivo = fopen(pPathDatIn,"rb");
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
fread(sliceDAT[vIndFrecuency],NUM_BYTES_SHORT,lenBuffer,vArchivo);
}
fclose(vArchivo);
}
/*
discretizarDAT: lee las primeras cuatro frecuencias de un archivo DAT
IN:
** pLenBuffer-> tamao de cada frecuencia (int)
** pSliceDAT-> informacion de cada frecuencia del dat(unsigned short **)
** pMatrizOUT-> variable de salida, matriz donde se almacenan los escalones (unsigned int **)
*/
void discretizarDAT(const int pLenBuffer, unsigned short **pSliceDAT, entero128 *pMatrizOUT){
const int vLenMatriz = pLenBuffer - LEN_FREC_COM;
unsigned int vIndCantFrecu = 0;
int vElemIniEscalon = 0;
while(vElemIniEscalon < vLenMatriz){
//printf("ANTES DEL FOR %d - %d\n", vElemIniEscalon , vLenMatriz);
//unsigned int vEscalon[NUM_FREC_COM] = {0};
for(vIndCantFrecu = 0; vIndCantFrecu < NUM_FREC_COM;vIndCantFrecu ++){
unsigned short vArray32Frecu[LEN_FREC_COM] = {0};
unsigned short vAverageBlock = 0;
unsigned int vSumValuBlock = 0;
int vIndFrecuencyBlock = 0;
short vInd32Block = 0;
for(vInd32Block = 0; vInd32Block < LEN_FREC_COM;vInd32Block ++){
vArray32Frecu[vInd32Block]=pSliceDAT[vIndCantFrecu][vElemIniEscalon + vInd32Block];
vSumValuBlock += vArray32Frecu[vInd32Block];
}
//Discretizar los 32 valores
float value = (((float)vSumValuBlock)/((float)LEN_FREC_COM))*CONST_MEAN[vIndCantFrecu];
vAverageBlock = (short)value;
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] = 0;
for(vIndFrecuencyBlock = 0; vIndFrecuencyBlock < LEN_FREC_COM;vIndFrecuencyBlock++){
if(vArray32Frecu[vIndFrecuencyBlock] > vAverageBlock){
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] <<= 1;
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] |= 0b1;
}
else
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] <<= 1;
}
}
//printf("ANTES DEL MEM %d - %d\n", vElemIniEscalon , vLenMatriz);
//memcpy(pMatrizOUT[vElemIniEscalon].nums,vEscalon,sizeof(vEscalon));
//printf("%d\t",pSliceDAT[0][vElemIniEscalon] );
if(pSliceDAT[0][vElemIniEscalon] == 65535 || pSliceDAT[0][vElemIniEscalon] == 15000|| pSliceDAT[0][vElemIniEscalon] == 39064)
pMatrizOUT[vElemIniEscalon].vacio = 1;
else
pMatrizOUT[vElemIniEscalon].vacio = 0;
pMatrizOUT[vElemIniEscalon].bloqueado = 0;
vElemIniEscalon ++;
}
}
/*
extraerMuestra: Extrae la matriz de escalones de una seccion del DAT
IN:
** pIndiceInicio-> indice de desplazamiento en el dat (int)
** pTamano-> tamao de la muestra (const int)
** pSliceDAT-> Dat completo (unsigned short **)
** pMatrizOUT-> variable de salida, matriz donde se almacenan los escalones (entero128 *)
void extraerMuestra(int pIndiceInicio, const int pTamano, entero128 *pMatrizOUT_DAT, huella *pHuella_Muestra){
/*unsigned short **sliceMuestra;
sliceMuestra = (unsigned short **)malloc(NUM_FREC_COM*sizeof(short*));
int vIndFrecuency;
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
sliceMuestra[vIndFrecuency] = (unsigned short *)malloc(pTamano*sizeof(short));
memcpy(sliceMuestra[vIndFrecuency], pSliceDAT[vIndFrecuency] + pIndiceInicio, pTamano);
}
discretizarDAT(pTamano, sliceMuestra,pMatrizOUT);
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
free(sliceMuestra[vIndFrecuency]);
}
free(sliceMuestra);
//printf("Copiamdo TAM %d, \n");
memcpy(pHuella_Muestra->matriz, pMatrizOUT_DAT + pIndiceInicio, pTamano*sizeof(entero128));
int indMatriz,vIndEscalon, vDiffHamming;
for(indMatriz = - multiplicadorTiempo; indMatriz < pTamano; indMatriz ++){
if(pIndiceInicio + indMatriz >= 0)
pMatrizOUT_DAT[pIndiceInicio + indMatriz].bloqueado = 1;
}
}
*/
/*
revisarHuella: toma una huella y la compara solo en PUNTOS_ANALISIS_PRIMER_FILTRO cantidad de puntos
IN:
** pIndiceInicio-> inicio del analisis
** pLenMatriz-> tamano de la matiz de comparacin
** pMatrizOUT_DAT-> matriz con escalones del DAT (entero128 *)
** pHuella_Muestra-> huella en analisis (huella)
*/
/*
*
//printf("indices %d %d\n",pIndiceInicio, pLenMatriz);
//const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO;
//printf("DESBLOQUEADA %d\n",vIndMatriz);
for(vIndMatriz_Huella = 0; vIndMatriz_Huella < pHuella_Muestra->tamano; vIndMatriz_Huella += vAvance){
if(!pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].bloqueado && !pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].vacio && vIndMatriz + vIndMatriz_Huella !=pHuella_Muestra->inicio + vIndMatriz_Huella ){
vDiffHamming = 0;
for(vIndEscalon = 0; vIndEscalon < NUM_INT_ESCALON; vIndEscalon ++){
vDiffHamming += __builtin_popcount(pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].nums[vIndEscalon] ^ pMatrizOUT_DAT[pHuella_Muestra->inicio + vIndMatriz_Huella].nums[vIndEscalon]);
}
if(vDiffHamming < vLimite){
vCantidadValidas ++;
printf("%d %d\n",vIndMatriz + vIndMatriz_Huella , pHuella_Muestra->inicio + vIndMatriz_Huella );
}
}else{
//printf("BLOQUEADA %d\n",vIndMatriz);
}
}
vPorcentajeSimilitud = (float)vCantidadValidas/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vPorcentajeSimilitud >= PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO){
printf("Limite %f Porcentaje %f validas %d %d\n", PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO,vPorcentajeSimilitud ,vCantidadValidas, pHuella_Muestra->cantidadCoincidencias);
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].indice = vIndMatriz;
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].porcentaje = vPorcentajeSimilitud;
pHuella_Muestra->cantidadCoincidencias = (pHuella_Muestra->cantidadCoincidencias + 1)%CANTIDAD_MAXIMA_RECONOCIMIENTOS;
vIndMatriz += TAMANO_MINIMO_MUESTRA_SEGUNDOS*multiplicadorTiempo;
}
*/
__global__ void revisarHuella_Cuda(int N, num128bitsInt *pMatrizHuella, miniHuella huellas[], int *sumOUT)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
int suma = 0;
int val1, val2, val3, val4;
for (int indMiniHuella = 0; indMiniHuella < NUM_MINI_HUELLAS; indMiniHuella++){
val1 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[0] ^ huellas[indMiniHuella].huella[0]);
val2 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[1] ^ huellas[indMiniHuella].huella[1]);
val3 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[2] ^ huellas[indMiniHuella].huella[2]);
val4 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[3] ^ huellas[indMiniHuella].huella[3]);
suma += val1 + val2 + val3 + val4;
}
sumOUT[idx] = suma;
}
}
void revisarHuella(int pIndiceInicio, const int pLenMatriz, entero128 *pMatrizOUT_DAT, huella * pHuella_Muestra, const int pLimite, const int pAvance, const float pPORCENTAJE){
/*
const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO*PUNTOS_ANALISIS_PRIMER_FILTRO;
int vAvance = pHuella_Muestra->tamano/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vAvance == 0)
vAvance = 1;
*/
int vIndMatriz, vIndMatriz_Huella, vIndEscalon;
pHuella_Muestra->cantidadCoincidencias = 0;
int vDiffHamming;
float vPorcentajeSimilitud;
int vArregloDiffHamming[pLenMatriz];
///
for(vIndMatriz =pIndiceInicio; vIndMatriz < pLenMatriz - pHuella_Muestra->tamano; vIndMatriz ++){
vDiffHamming = 0;
for(vIndMatriz_Huella = 0; vIndMatriz_Huella < pHuella_Muestra->tamano; vIndMatriz_Huella += pAvance){
for(vIndEscalon = 0; vIndEscalon < NUM_INT_ESCALON; vIndEscalon ++){
vDiffHamming += __builtin_popcount(pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].nums[vIndEscalon] ^ pMatrizOUT_DAT[pHuella_Muestra->inicio + vIndMatriz_Huella].nums[vIndEscalon]);
}
}
vArregloDiffHamming[vIndMatriz] = vDiffHamming;
//break;
}
///
//printf("Suma Hamming %d \t Limite %d \n",vDiffHamming, vLimite );
for(vIndMatriz =pIndiceInicio; vIndMatriz < pLenMatriz - pHuella_Muestra->tamano; vIndMatriz ++){
if((vIndMatriz < pHuella_Muestra->inicio - RESTRICCION_CERCANIA_SEGUNDOS*multiplicadorTiempo ||
vIndMatriz > pHuella_Muestra->inicio + RESTRICCION_CERCANIA_SEGUNDOS*multiplicadorTiempo)
&& vIndMatriz !=pHuella_Muestra->inicio ){
if(!pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].bloqueado && !pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].vacio ){
if(vArregloDiffHamming[vIndMatriz] < pLimite){
vPorcentajeSimilitud = 1 - (float)vArregloDiffHamming[vIndMatriz]/(float)(TAM_ESCALON*PUNTOS_ANALISIS_PRIMER_FILTRO);
if( vPorcentajeSimilitud > pPORCENTAJE){
//printf("Limite %f Porcentaje %f validas %d %d\n", PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO,vPorcentajeSimilitud ,vCantidadValidas, pHuella_Muestra->cantidadCoincidencias);
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].indice = vIndMatriz;
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].porcentaje = vPorcentajeSimilitud;
pHuella_Muestra->cantidadCoincidencias = (pHuella_Muestra->cantidadCoincidencias + 1)%CANTIDAD_MAXIMA_RECONOCIMIENTOS;
//vIndMatriz += TAMANO_MINIMO_MUESTRA_SEGUNDOS*multiplicadorTiempo;
}
}
}
}
}
//printf("Coincidencias encontradas = %d \n", pHuella_Muestra->cantidadCoincidencias);
}
| 029177ed88e8f7af5924331dbde8fe9d68792eff.cu | /***********************************************************************
***********************************************************************
**********************Coincidencias Similares**************************
***********************************************************************
****************Analizador de un día contra un día*********************
***********************************************************************
***********************************************************************
***********************************************************************
****************************Diego GR***********************************
************************Alejandro Morales******************************
***********************************************************************
*/
/*
cd /home/monitec/Documentos/ProgramasEnC/ExtractorSimilares/
gcc -o a AnalisadorMuestras.c
./a '03-Costa Rica Nacional-20151031000000-50-0000-canal 7-0000-Servidor15-0000-0000.DAT'
./a '01-Costarica05-20151022071837-600-90.3 MHz-7E4885960-100-PCx_10_14-2-MNTC0023.dat'
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <dirent.h>
#include <time.h>
#define CANTIDAD_MAXIMA_RECONOCIMIENTOS 700
#define NUM_BYTES_FRECUENCY 10
#define NUM_FREC_COM 4
#define LEN_FREC_COM 32
#define NUM_INT_ESCALON 4
#define TAM_ESCALON 128
#define PORCENTAJE_BARRA_MINIMO_PERMITIDO 0.35f
#define PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO 0.8f
#define PUNTOS_ANALISIS_PRIMER_FILTRO 100
#define TAMANO_MINIMO_MUESTRA_SEGUNDOS 10
#define RESTRICCION_CERCANIA_SEGUNDOS 10
#define NUM_BYTES_SHORT 2
const float CONST_MEAN[] = {0.973f,0.93f,0.905f,0.9f,0};
typedef struct{
unsigned int nums[NUM_INT_ESCALON];
char bloqueado;
char vacio;
}entero128;
typedef struct{
unsigned int inicio;
unsigned int duracion;
}reconocimiento;
typedef struct{
unsigned int indice;
float porcentaje;
}coincidencia;
typedef struct{
unsigned int inicio;
unsigned int tamano;
int cantidadCoincidencias;
int cantidadReconocimientos;
coincidencia coincidencias[CANTIDAD_MAXIMA_RECONOCIMIENTOS];
reconocimiento reconocimientos[CANTIDAD_MAXIMA_RECONOCIMIENTOS];
}huella;
unsigned int multiplicadorTiempo = 1;
unsigned int SEGUNDOS_POR_DIA = 86400;
void analizar(const char * pRutaDat);
int fsize(const char *filename);
void leerSuperDat(const char * pPathDatIn, int lenBuffer,unsigned short ** sliceDAT);
void discretizarDAT(const int pLenBuffer, unsigned short **pSliceDAT, entero128 *pMatrizOUT);
void extraerMuestra(int pIndiceInicio, const int pTamano, entero128 *pMatrizOUT_DAT, huella *pHuella_Muestra);
void revisarHuella(int pIndiceInicio, const int pLenMatriz, entero128 *pMatrizOUT_DAT, huella * pHuella_Muestra, const int pLimite, const int pAvance, const float pPORCENTAJE);
int main( int arc, char **argv ){
analizar(argv[1]);
return 0;
}
void analizar(const char * pRutaDat){
int lenBuffer = fsize(pRutaDat)/NUM_BYTES_FRECUENCY;
if(lenBuffer <= 126000){
SEGUNDOS_POR_DIA = 630;
}
int vEscalonAnalisis = 0, vIndMatriz, vTamMuestraDAT, vTamMuestraMatriz, vCantidadHuellas = 0, vIndCoincidencias = 0;
multiplicadorTiempo = lenBuffer/SEGUNDOS_POR_DIA;
unsigned short **sliceDAT;
sliceDAT = (unsigned short **)malloc(NUM_FREC_COM*sizeof(short*));
int vIndFrecuency;
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
sliceDAT[vIndFrecuency] = (unsigned short *)malloc(lenBuffer*sizeof(short));
}
leerSuperDat(pRutaDat, lenBuffer, sliceDAT);
int vlenMatriz = lenBuffer - LEN_FREC_COM;
entero128 * vMatrizOUT;
vMatrizOUT = (entero128 *)malloc(vlenMatriz*sizeof(entero128));
for(vIndMatriz = 0; vIndMatriz < vlenMatriz; vIndMatriz ++){
for(vEscalonAnalisis = 0; vEscalonAnalisis < NUM_INT_ESCALON; vEscalonAnalisis ++){
vMatrizOUT[vIndMatriz].nums[vEscalonAnalisis] = 15000;
}
}
discretizarDAT(lenBuffer, sliceDAT, vMatrizOUT);
vTamMuestraDAT = TAMANO_MINIMO_MUESTRA_SEGUNDOS * multiplicadorTiempo;
vTamMuestraMatriz = vTamMuestraDAT - LEN_FREC_COM;
huella * listaHuellas = (huella *)malloc((vlenMatriz/vTamMuestraDAT + 1) * sizeof(huella));
vIndMatriz = 655200;
clock_t startC = clock();
while(vIndMatriz < vlenMatriz - vTamMuestraDAT){
//printf("IND %d - %d , %d , %d\n", vIndMatriz,vlenMatriz - vTamMuestraMatriz,vTamMuestraMatriz, multiplicadorTiempo);
clock_t startComparador = clock();
/*if(listaHuellas[vCantidadHuellas].matriz == NULL)
listaHuellas[vCantidadHuellas].matriz = (entero128 *)malloc(vTamMuestraMatriz*sizeof(entero128));
*/
listaHuellas[vCantidadHuellas].tamano = vTamMuestraDAT;
listaHuellas[vCantidadHuellas].inicio = vIndMatriz;
const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO*PUNTOS_ANALISIS_PRIMER_FILTRO;
int vAvance = vTamMuestraDAT/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vAvance == 0)
vAvance = 1;
revisarHuella(vIndMatriz, vlenMatriz - 12000, vMatrizOUT, &listaHuellas[vCantidadHuellas],vLimite,vAvance,PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO );
//printf("AFUERA -- Cantidad Coincidencias %d \n", listaHuellas[vCantidadHuellas].cantidadCoincidencias);
//revisarHuella(vIndMatriz , vlenMatriz, vMatrizOUT, &listaHuellas[vCantidadHuellas]);
if(listaHuellas[vCantidadHuellas].cantidadCoincidencias > 0){
printf("***************************************************************\nTiempo huella %d/%d = %f\n",vIndMatriz/multiplicadorTiempo,vlenMatriz/multiplicadorTiempo, ((double)clock() - startComparador)/CLOCKS_PER_SEC);
printf("Cantidad Coincidencias %d \n", listaHuellas[vCantidadHuellas].cantidadCoincidencias);
float tSeg = listaHuellas[vCantidadHuellas].inicio/multiplicadorTiempo;
float H_hora = tSeg/3600.0f;
float H_mins = (H_hora - (int)H_hora)*60;
float H_segs = (H_mins - (int)H_mins)*60;
for(vIndCoincidencias = 0; vIndCoincidencias < listaHuellas[vCantidadHuellas].cantidadCoincidencias; vIndCoincidencias ++){
float tSegC = listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice/multiplicadorTiempo;
float C_hora = tSegC/3600.0f;
float C_mins = (C_hora - (int)C_hora)*60;
float C_segs = (C_mins - (int)C_mins)*60;
printf("Huella %d/%d\t%d\t%d -> %d\t%d:%d:%d\t%d -> %d\t%d:%d:%d\t%f\n",vCantidadHuellas,vlenMatriz/vTamMuestraMatriz ,vIndCoincidencias,
listaHuellas[vCantidadHuellas].inicio,
listaHuellas[vCantidadHuellas].inicio/multiplicadorTiempo,
(int)H_hora,(int)H_mins,(int)H_segs,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].indice/multiplicadorTiempo,
(int)C_hora,(int)C_mins,(int)C_segs,
listaHuellas[vCantidadHuellas].coincidencias[vIndCoincidencias].porcentaje);
}
vCantidadHuellas++;
//HACER CRECER LA MUESTRA
}else{
int indMatriz;
for(indMatriz = 0; indMatriz < vTamMuestraMatriz; indMatriz ++){
if(!vMatrizOUT[vIndMatriz].vacio)
vMatrizOUT[vIndMatriz + indMatriz].bloqueado = 0;
}
}
//break;
vIndMatriz += vTamMuestraDAT;
}
int i;
for(i = 0 ; i < vCantidadHuellas;i++ ){
printf("Indice %d Coincidencias %d\t", i, listaHuellas[i].cantidadCoincidencias);
}
printf("\nTAM %d %d\n", lenBuffer, vCantidadHuellas);
}
/*
*fsize: indica el tamaño en bytes de un archivo.
* IN: nombre del archivo (const char *)
* OUT: tamaño en bytes del archivo (int)
*/
int fsize(const char *filename) {
struct stat st;
if (stat(filename, &st) == 0)
return st.st_size;
return -1;
}
/*
leerSuperDat: lee las primeras cuatro frecuencias de un archivo DAT
IN:
** pPathDatIn-> ruta del archivo DAT (char *)
** lenBuffer-> tamaño de cada frecuencia (int)
** sliceDAT-> variable de salida, matriz donde se copian la informacion de cada frecuencia (unsigned short **)
*/
void leerSuperDat(const char* pPathDatIn, int lenBuffer,unsigned short ** sliceDAT){
FILE *vArchivo;
int vIndFrecuency;
vArchivo = fopen(pPathDatIn,"rb");
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
fread(sliceDAT[vIndFrecuency],NUM_BYTES_SHORT,lenBuffer,vArchivo);
}
fclose(vArchivo);
}
/*
discretizarDAT: lee las primeras cuatro frecuencias de un archivo DAT
IN:
** pLenBuffer-> tamaño de cada frecuencia (int)
** pSliceDAT-> informacion de cada frecuencia del dat(unsigned short **)
** pMatrizOUT-> variable de salida, matriz donde se almacenan los escalones (unsigned int **)
*/
void discretizarDAT(const int pLenBuffer, unsigned short **pSliceDAT, entero128 *pMatrizOUT){
const int vLenMatriz = pLenBuffer - LEN_FREC_COM;
unsigned int vIndCantFrecu = 0;
int vElemIniEscalon = 0;
while(vElemIniEscalon < vLenMatriz){
//printf("ANTES DEL FOR %d - %d\n", vElemIniEscalon , vLenMatriz);
//unsigned int vEscalon[NUM_FREC_COM] = {0};
for(vIndCantFrecu = 0; vIndCantFrecu < NUM_FREC_COM;vIndCantFrecu ++){
unsigned short vArray32Frecu[LEN_FREC_COM] = {0};
unsigned short vAverageBlock = 0;
unsigned int vSumValuBlock = 0;
int vIndFrecuencyBlock = 0;
short vInd32Block = 0;
for(vInd32Block = 0; vInd32Block < LEN_FREC_COM;vInd32Block ++){
vArray32Frecu[vInd32Block]=pSliceDAT[vIndCantFrecu][vElemIniEscalon + vInd32Block];
vSumValuBlock += vArray32Frecu[vInd32Block];
}
//Discretizar los 32 valores
float value = (((float)vSumValuBlock)/((float)LEN_FREC_COM))*CONST_MEAN[vIndCantFrecu];
vAverageBlock = (short)value;
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] = 0;
for(vIndFrecuencyBlock = 0; vIndFrecuencyBlock < LEN_FREC_COM;vIndFrecuencyBlock++){
if(vArray32Frecu[vIndFrecuencyBlock] > vAverageBlock){
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] <<= 1;
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] |= 0b1;
}
else
pMatrizOUT[vElemIniEscalon].nums[vIndCantFrecu] <<= 1;
}
}
//printf("ANTES DEL MEM %d - %d\n", vElemIniEscalon , vLenMatriz);
//memcpy(pMatrizOUT[vElemIniEscalon].nums,vEscalon,sizeof(vEscalon));
//printf("%d\t",pSliceDAT[0][vElemIniEscalon] );
if(pSliceDAT[0][vElemIniEscalon] == 65535 || pSliceDAT[0][vElemIniEscalon] == 15000|| pSliceDAT[0][vElemIniEscalon] == 39064)
pMatrizOUT[vElemIniEscalon].vacio = 1;
else
pMatrizOUT[vElemIniEscalon].vacio = 0;
pMatrizOUT[vElemIniEscalon].bloqueado = 0;
vElemIniEscalon ++;
}
}
/*
extraerMuestra: Extrae la matriz de escalones de una seccion del DAT
IN:
** pIndiceInicio-> indice de desplazamiento en el dat (int)
** pTamano-> tamaño de la muestra (const int)
** pSliceDAT-> Dat completo (unsigned short **)
** pMatrizOUT-> variable de salida, matriz donde se almacenan los escalones (entero128 *)
void extraerMuestra(int pIndiceInicio, const int pTamano, entero128 *pMatrizOUT_DAT, huella *pHuella_Muestra){
/*unsigned short **sliceMuestra;
sliceMuestra = (unsigned short **)malloc(NUM_FREC_COM*sizeof(short*));
int vIndFrecuency;
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
sliceMuestra[vIndFrecuency] = (unsigned short *)malloc(pTamano*sizeof(short));
memcpy(sliceMuestra[vIndFrecuency], pSliceDAT[vIndFrecuency] + pIndiceInicio, pTamano);
}
discretizarDAT(pTamano, sliceMuestra,pMatrizOUT);
for(vIndFrecuency = 0;vIndFrecuency<NUM_FREC_COM;vIndFrecuency++){
free(sliceMuestra[vIndFrecuency]);
}
free(sliceMuestra);
//printf("Copiamdo TAM %d, \n");
memcpy(pHuella_Muestra->matriz, pMatrizOUT_DAT + pIndiceInicio, pTamano*sizeof(entero128));
int indMatriz,vIndEscalon, vDiffHamming;
for(indMatriz = - multiplicadorTiempo; indMatriz < pTamano; indMatriz ++){
if(pIndiceInicio + indMatriz >= 0)
pMatrizOUT_DAT[pIndiceInicio + indMatriz].bloqueado = 1;
}
}
*/
/*
revisarHuella: toma una huella y la compara solo en PUNTOS_ANALISIS_PRIMER_FILTRO cantidad de puntos
IN:
** pIndiceInicio-> inicio del analisis
** pLenMatriz-> tamano de la matiz de comparación
** pMatrizOUT_DAT-> matriz con escalones del DAT (entero128 *)
** pHuella_Muestra-> huella en analisis (huella)
*/
/*
*
//printf("indices %d %d\n",pIndiceInicio, pLenMatriz);
//const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO;
//printf("DESBLOQUEADA %d\n",vIndMatriz);
for(vIndMatriz_Huella = 0; vIndMatriz_Huella < pHuella_Muestra->tamano; vIndMatriz_Huella += vAvance){
if(!pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].bloqueado && !pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].vacio && vIndMatriz + vIndMatriz_Huella !=pHuella_Muestra->inicio + vIndMatriz_Huella ){
vDiffHamming = 0;
for(vIndEscalon = 0; vIndEscalon < NUM_INT_ESCALON; vIndEscalon ++){
vDiffHamming += __builtin_popcount(pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].nums[vIndEscalon] ^ pMatrizOUT_DAT[pHuella_Muestra->inicio + vIndMatriz_Huella].nums[vIndEscalon]);
}
if(vDiffHamming < vLimite){
vCantidadValidas ++;
printf("%d %d\n",vIndMatriz + vIndMatriz_Huella , pHuella_Muestra->inicio + vIndMatriz_Huella );
}
}else{
//printf("BLOQUEADA %d\n",vIndMatriz);
}
}
vPorcentajeSimilitud = (float)vCantidadValidas/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vPorcentajeSimilitud >= PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO){
printf("Limite %f Porcentaje %f validas %d %d\n", PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO,vPorcentajeSimilitud ,vCantidadValidas, pHuella_Muestra->cantidadCoincidencias);
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].indice = vIndMatriz;
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].porcentaje = vPorcentajeSimilitud;
pHuella_Muestra->cantidadCoincidencias = (pHuella_Muestra->cantidadCoincidencias + 1)%CANTIDAD_MAXIMA_RECONOCIMIENTOS;
vIndMatriz += TAMANO_MINIMO_MUESTRA_SEGUNDOS*multiplicadorTiempo;
}
*/
__global__ void revisarHuella_Cuda(int N, num128bitsInt *pMatrizHuella, miniHuella huellas[], int *sumOUT)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
int suma = 0;
int val1, val2, val3, val4;
for (int indMiniHuella = 0; indMiniHuella < NUM_MINI_HUELLAS; indMiniHuella++){
val1 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[0] ^ huellas[indMiniHuella].huella[0]);
val2 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[1] ^ huellas[indMiniHuella].huella[1]);
val3 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[2] ^ huellas[indMiniHuella].huella[2]);
val4 = __popc(pMatrizHuella[idx+huellas[indMiniHuella].indice].Num[3] ^ huellas[indMiniHuella].huella[3]);
suma += val1 + val2 + val3 + val4;
}
sumOUT[idx] = suma;
}
}
void revisarHuella(int pIndiceInicio, const int pLenMatriz, entero128 *pMatrizOUT_DAT, huella * pHuella_Muestra, const int pLimite, const int pAvance, const float pPORCENTAJE){
/*
const int vLimite = TAM_ESCALON*PORCENTAJE_BARRA_MINIMO_PERMITIDO*PUNTOS_ANALISIS_PRIMER_FILTRO;
int vAvance = pHuella_Muestra->tamano/PUNTOS_ANALISIS_PRIMER_FILTRO;
if(vAvance == 0)
vAvance = 1;
*/
int vIndMatriz, vIndMatriz_Huella, vIndEscalon;
pHuella_Muestra->cantidadCoincidencias = 0;
int vDiffHamming;
float vPorcentajeSimilitud;
int vArregloDiffHamming[pLenMatriz];
///
for(vIndMatriz =pIndiceInicio; vIndMatriz < pLenMatriz - pHuella_Muestra->tamano; vIndMatriz ++){
vDiffHamming = 0;
for(vIndMatriz_Huella = 0; vIndMatriz_Huella < pHuella_Muestra->tamano; vIndMatriz_Huella += pAvance){
for(vIndEscalon = 0; vIndEscalon < NUM_INT_ESCALON; vIndEscalon ++){
vDiffHamming += __builtin_popcount(pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].nums[vIndEscalon] ^ pMatrizOUT_DAT[pHuella_Muestra->inicio + vIndMatriz_Huella].nums[vIndEscalon]);
}
}
vArregloDiffHamming[vIndMatriz] = vDiffHamming;
//break;
}
///
//printf("Suma Hamming %d \t Limite %d \n",vDiffHamming, vLimite );
for(vIndMatriz =pIndiceInicio; vIndMatriz < pLenMatriz - pHuella_Muestra->tamano; vIndMatriz ++){
if((vIndMatriz < pHuella_Muestra->inicio - RESTRICCION_CERCANIA_SEGUNDOS*multiplicadorTiempo ||
vIndMatriz > pHuella_Muestra->inicio + RESTRICCION_CERCANIA_SEGUNDOS*multiplicadorTiempo)
&& vIndMatriz !=pHuella_Muestra->inicio ){
if(!pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].bloqueado && !pMatrizOUT_DAT[vIndMatriz + vIndMatriz_Huella].vacio ){
if(vArregloDiffHamming[vIndMatriz] < pLimite){
vPorcentajeSimilitud = 1 - (float)vArregloDiffHamming[vIndMatriz]/(float)(TAM_ESCALON*PUNTOS_ANALISIS_PRIMER_FILTRO);
if( vPorcentajeSimilitud > pPORCENTAJE){
//printf("Limite %f Porcentaje %f validas %d %d\n", PORCENTAJE_MINIMO_COINCIDENCIAS_PERMITIDO,vPorcentajeSimilitud ,vCantidadValidas, pHuella_Muestra->cantidadCoincidencias);
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].indice = vIndMatriz;
pHuella_Muestra->coincidencias[pHuella_Muestra->cantidadCoincidencias].porcentaje = vPorcentajeSimilitud;
pHuella_Muestra->cantidadCoincidencias = (pHuella_Muestra->cantidadCoincidencias + 1)%CANTIDAD_MAXIMA_RECONOCIMIENTOS;
//vIndMatriz += TAMANO_MINIMO_MUESTRA_SEGUNDOS*multiplicadorTiempo;
}
}
}
}
}
//printf("Coincidencias encontradas = %d \n", pHuella_Muestra->cantidadCoincidencias);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.